From 026583e60e9ec2b83049e8ceee16f182560d2562 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 26 Feb 2014 14:34:10 -0800 Subject: [PATCH 001/486] Make hypervisor_version an int in fakeVirt driver This is a follow up to 9e770e62135fe9c2c8ac0121a5a79245b25a7847, which missed the change in a duplicate copy of host_status. Add regression test to test_virt_drivers. This requires changes to the API samples, because they were wrong. virt drivers use convert_version_to_int which converts a version string to a 4 digit number ("1.0" becomes to 1000) Change-Id: I28ce23509e3c9feae183a49a8fc5bf3c7c601295 Closes-Bug: #1285035 --- .../hypervisors-show-with-ip-resp.json | 2 +- .../os-extended-hypervisors/hypervisors-show-with-ip-resp.xml | 2 +- doc/api_samples/os-hypervisors/hypervisors-detail-resp.json | 4 ++-- doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml | 4 ++-- doc/api_samples/os-hypervisors/hypervisors-show-resp.json | 2 +- doc/api_samples/os-hypervisors/hypervisors-show-resp.xml | 2 +- .../api_samples/os-hypervisors/hypervisors-detail-resp.json | 2 +- doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json | 2 +- doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json | 2 +- doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json | 2 +- .../hypervisors-show-with-ip-resp.json.tpl | 2 +- .../hypervisors-show-with-ip-resp.xml.tpl | 2 +- .../os-hypervisors/hypervisors-detail-resp.json.tpl | 2 +- .../os-hypervisors/hypervisors-detail-resp.xml.tpl | 2 +- .../api_samples/os-hypervisors/hypervisors-show-resp.json.tpl | 2 +- .../api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl | 2 +- nova/tests/integrated/test_api_samples.py | 3 ++- .../os-hypervisors/hypervisors-detail-resp.json.tpl | 2 +- .../api_samples/os-hypervisors/hypervisors-show-resp.json.tpl | 2 +- .../api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl | 2 +- .../v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl | 2 +- nova/tests/integrated/v3/test_migrate_server.py | 2 +- nova/tests/integrated/v3/test_pci.py | 2 +- nova/tests/virt/test_virt_drivers.py | 1 + nova/virt/fake.py | 2 +- 25 files changed, 28 insertions(+), 26 deletions(-) diff --git a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json index 01b6428446..bb20f50afd 100644 --- a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json +++ b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml index 244e899969..4fd6ea8f9e 100644 --- a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml +++ b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml @@ -1,4 +1,4 @@ - + diff --git a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json index b124901ea8..8ee96284d1 100644 --- a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json +++ b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, @@ -23,4 +23,4 @@ "vcpus_used": 0 } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml index 709f4fcd6d..6904c089c8 100644 --- a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml +++ b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml @@ -1,6 +1,6 @@ - + - \ No newline at end of file + diff --git a/doc/api_samples/os-hypervisors/hypervisors-show-resp.json b/doc/api_samples/os-hypervisors/hypervisors-show-resp.json index 59ac652331..02945469ab 100644 --- a/doc/api_samples/os-hypervisors/hypervisors-show-resp.json +++ b/doc/api_samples/os-hypervisors/hypervisors-show-resp.json @@ -7,7 +7,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml index 3b21782c07..471709fb24 100644 --- a/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml +++ b/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml @@ -1,4 +1,4 @@ - + diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json index 5fa4493ef6..e800c777ae 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json @@ -9,7 +9,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json index 1ab1b99be8..0c4957bdae 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json b/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json index aa0e92efb3..1ca293225e 100644 --- a/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json +++ b/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json @@ -9,7 +9,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json b/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json index 1750501621..2a6e41bf4d 100644 --- a/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json +++ b/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl index 6b287a838c..a1e5f2080b 100644 --- a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": %(hypervisor_id)s, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl index 5b9f66416e..ed2a8b0829 100644 --- a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl @@ -1,4 +1,4 @@ - + diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl index db29146071..9ccda9c7e6 100644 --- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl @@ -9,7 +9,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl index e8d8a3f40a..1169ce1e01 100644 --- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl @@ -1,6 +1,6 @@ - + diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl index c9638423d2..356316d61f 100644 --- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl @@ -7,7 +7,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": %(hypervisor_id)s, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl index d7af1246c9..090f720398 100644 --- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl @@ -1,4 +1,4 @@ - + diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index d9e1a44a03..eea6afba22 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -1969,7 +1969,8 @@ def fake_get_compute(context, host): report_count=1, updated_at='foo', hypervisor_type='bar', - hypervisor_version='1', + hypervisor_version= + utils.convert_version_to_int('1.0'), disabled=False) return {'compute_node': [service]} self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute) diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl index 0678922114..fb473a03bb 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl @@ -9,7 +9,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": %(hypervisor_id)s, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl index 6b287a838c..a1e5f2080b 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": %(hypervisor_id)s, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl index 9eb9e8c9c0..69c5df943f 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl @@ -9,7 +9,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl index 8c626fd570..6a6fbe3d3b 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl @@ -8,7 +8,7 @@ "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/integrated/v3/test_migrate_server.py b/nova/tests/integrated/v3/test_migrate_server.py index 5c188aec21..456ee4f563 100644 --- a/nova/tests/integrated/v3/test_migrate_server.py +++ b/nova/tests/integrated/v3/test_migrate_server.py @@ -55,7 +55,7 @@ def fake_get_compute(context, host): report_count=1, updated_at='foo', hypervisor_type='bar', - hypervisor_version='1', + hypervisor_version='1000', disabled=False) return {'compute_node': [service]} self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute) diff --git a/nova/tests/integrated/v3/test_pci.py b/nova/tests/integrated/v3/test_pci.py index e6ba2b0484..30c664dbf2 100644 --- a/nova/tests/integrated/v3/test_pci.py +++ b/nova/tests/integrated/v3/test_pci.py @@ -87,7 +87,7 @@ def setUp(self): "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", - "hypervisor_version": 1, + "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index 12d3150601..de34b3d085 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -591,6 +591,7 @@ def _check_available_resource_fields(self, host_status): 'supported_instances'] for key in keys: self.assertIn(key, host_status) + self.assertIsInstance(host_status['hypervisor_version'], int) @catch_notimplementederror def test_get_host_stats(self): diff --git a/nova/virt/fake.py b/nova/virt/fake.py index ea175cb75d..3133f91b76 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -363,7 +363,7 @@ def get_available_resource(self, nodename): 'memory_mb_used': 0, 'local_gb_used': 0, 'hypervisor_type': 'fake', - 'hypervisor_version': '1.0', + 'hypervisor_version': utils.convert_version_to_int('1.0'), 'hypervisor_hostname': nodename, 'disk_available_least': 0, 'cpu_info': '?', From 2620a7c74e2d7e9c904af364a59efcd69cb5947f Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Thu, 8 May 2014 12:07:27 -0700 Subject: [PATCH 002/486] Fix security group race condition while creating rule Previously, it was possible for someone to create a security group rule where the rule references another security group. To do this nova would first create the security group rule and then look up the referenced group (group_id's) name in order to return that via the api. During this time it's possible for someone to delete this security group rule and the referenced group before the call returned resulting in a 404 error being raised. This patch addresses this issue by looking up the group name first and then creating the security group rule in order to avoid this from occuring. Note: this patch also adds a test to cover the case where an invalid group id was passed though this does not really add any real additional coverage to the change largely because the code is currently using global stubs. That said, the previous coverage should hopefully be sufficient Change-Id: If58ffa5629ba5166f260379ac47922974de31be0 Related-bug: 1262566 --- .../compute/contrib/security_groups.py | 30 +++++++++++-------- .../compute/contrib/test_security_groups.py | 10 ++++++- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py index ee3e0ffea3..6d2856b096 100644 --- a/nova/api/openstack/compute/contrib/security_groups.py +++ b/nova/api/openstack/compute/contrib/security_groups.py @@ -208,7 +208,12 @@ def __init__(self): self.compute_api = compute.API( security_group_api=self.security_group_api) - def _format_security_group_rule(self, context, rule): + def _format_security_group_rule(self, context, rule, group_rule_data=None): + """Return a secuity group rule in desired API response format. + + If group_rule_data is passed in that is used rather than querying + for it. + """ sg_rule = {} sg_rule['id'] = rule['id'] sg_rule['parent_group_id'] = rule['parent_group_id'] @@ -235,6 +240,8 @@ def _format_security_group_rule(self, context, rule): return sg_rule['group'] = {'name': source_group.get('name'), 'tenant_id': source_group.get('project_id')} + elif group_rule_data: + sg_rule['group'] = group_rule_data else: sg_rule['ip_range'] = {'cidr': rule['cidr']} return sg_rule @@ -394,23 +401,22 @@ def create(self, req, body): msg = _("Bad prefix for network in cidr %s") % new_rule['cidr'] raise exc.HTTPBadRequest(explanation=msg) + group_rule_data = None with translate_exceptions(): + if sg_rule.get('group_id'): + source_group = self.security_group_api.get( + context, id=sg_rule['group_id']) + group_rule_data = {'name': source_group.get('name'), + 'tenant_id': source_group.get('project_id')} + security_group_rule = ( self.security_group_api.create_security_group_rule( context, security_group, new_rule)) formatted_rule = self._format_security_group_rule(context, - security_group_rule) - if formatted_rule: - return {"security_group_rule": formatted_rule} - - # TODO(arosen): if we first look up the security group information for - # the group_id before creating the rule we can avoid the case that - # the remote group (group_id) has been deleted when we go to look - # up it's name. - with translate_exceptions(): - raise exception.SecurityGroupNotFound( - security_group_id=sg_rule['group_id']) + security_group_rule, + group_rule_data) + return {"security_group_rule": formatted_rule} def _rule_args_to_dict(self, context, to_port=None, from_port=None, ip_protocol=None, cidr=None, group_id=None): diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py index 7029de5a33..664b91e0bb 100644 --- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py @@ -982,13 +982,21 @@ def test_create_with_invalid_parent_group_id(self): req, {'security_group_rule': rule}) def test_create_with_non_existing_parent_group_id(self): - rule = security_group_rule_template(group_id='invalid', + rule = security_group_rule_template(group_id=None, parent_group_id=self.invalid_id) req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, {'security_group_rule': rule}) + def test_create_with_non_existing_group_id(self): + rule = security_group_rule_template(group_id='invalid', + parent_group_id=self.sg2['id']) + + req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, {'security_group_rule': rule}) + def test_create_with_invalid_protocol(self): rule = security_group_rule_template(ip_protocol='invalid-protocol', cidr='10.2.2.0/24', From 8ff170dc95bf3101fe38a2624e941bfa3b7c1138 Mon Sep 17 00:00:00 2001 From: "Leandro I. Costantino" Date: Mon, 19 May 2014 19:58:47 -0300 Subject: [PATCH 003/486] VM in rescue state must have a restricted set of actions Right now it is possible to pause, suspend and stop a VM in state RESCUED, so after the state is changed, it's not possible to trigger unrescue anymore since the original state is lost. This patch remove vm_states.RESCUED as valid state from stop, pause and suspend actions. The vm_states devref is also updated to reflect this change including the current reboot flow.( vm_states.RESCUED cannot be rebooted as per today code) DocImpact Closes-Bug: #1319182 Co-Authored-By: Cyril Roelandt Change-Id: I531dea5a5499bf93c24bea37850d562134dee281 --- doc/source/devref/vmstates.rst | 7 ++-- nova/compute/api.py | 7 ++-- nova/tests/compute/test_compute_api.py | 46 ++++++++++++++++++++++++-- 3 files changed, 52 insertions(+), 8 deletions(-) diff --git a/doc/source/devref/vmstates.rst b/doc/source/devref/vmstates.rst index 80075124fd..4ab800ec69 100644 --- a/doc/source/devref/vmstates.rst +++ b/doc/source/devref/vmstates.rst @@ -88,6 +88,7 @@ task states for various commands issued by the user: rescue -> error active -> rescue stopped -> rescue + error -> rescue unrescue [shape="rectangle"] unrescue -> active @@ -139,7 +140,9 @@ task states for various commands issued by the user: reboot -> error active -> reboot stopped -> reboot - rescued -> reboot + paused -> reboot + suspended -> reboot + error -> reboot live_migrate [shape="rectangle"] live_migrate -> active @@ -159,4 +162,4 @@ The following diagram shows the sequence of VM states, task states, and power states when a new VM instance is created. -.. image:: /images/run_instance_walkthrough.png \ No newline at end of file +.. image:: /images/run_instance_walkthrough.png diff --git a/nova/compute/api.py b/nova/compute/api.py index 95da15e8ad..55496439eb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1751,8 +1751,7 @@ def force_stop(self, context, instance, do_cast=True): @check_instance_lock @check_instance_host @check_instance_cell - @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED, - vm_states.ERROR]) + @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR]) def stop(self, context, instance, do_cast=True): """Stop an instance.""" self.force_stop(context, instance, do_cast) @@ -2509,7 +2508,7 @@ def remove_fixed_ip(self, context, instance, address): @wrap_check_policy @check_instance_lock @check_instance_cell - @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED]) + @check_instance_state(vm_state=[vm_states.ACTIVE]) def pause(self, context, instance): """Pause the given instance.""" instance.task_state = task_states.PAUSING @@ -2536,7 +2535,7 @@ def get_diagnostics(self, context, instance): @wrap_check_policy @check_instance_lock @check_instance_cell - @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED]) + @check_instance_state(vm_state=[vm_states.ACTIVE]) def suspend(self, context, instance): """Suspend the given instance.""" instance.task_state = task_states.SUSPENDING diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index 5a3105f65e..2c8cf9a503 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -67,6 +67,16 @@ def setUp(self): self.context = context.RequestContext(self.user_id, self.project_id) + def _get_vm_states(self, exclude_states=None): + vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED, + vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED, + vm_states.RESIZED, vm_states.SOFT_DELETED, + vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED, + vm_states.SHELVED_OFFLOADED]) + if not exclude_states: + exclude_states = set() + return vm_state - exclude_states + def _create_flavor(self, params=None): flavor = {'id': 1, 'flavorid': 1, @@ -204,6 +214,19 @@ def test_suspend(self): self.assertEqual(task_states.SUSPENDING, instance.task_state) + def _test_suspend_fails(self, vm_state): + params = dict(vm_state=vm_state) + instance = self._create_instance_obj(params=params) + self.assertIsNone(instance.task_state) + self.assertRaises(exception.InstanceInvalidState, + self.compute_api.suspend, + self.context, instance) + + def test_suspend_fails_invalid_states(self): + invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) + for state in invalid_vm_states: + self._test_suspend_fails(state) + def test_resume(self): # Ensure instance can be resumed (if suspended). instance = self._create_instance_obj( @@ -309,13 +332,19 @@ def test_stop(self): def test_stop_stopped_instance_with_bypass(self): self._test_stop(vm_states.STOPPED, force=True) - def test_stop_invalid_state(self): - params = dict(vm_state=vm_states.PAUSED) + def _test_stop_invalid_state(self, vm_state): + params = dict(vm_state=vm_state) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) + def test_stop_fails_invalid_states(self): + invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE, + vm_states.ERROR])) + for state in invalid_vm_states: + self._test_stop_invalid_state(state) + def test_stop_a_stopped_inst(self): params = {'vm_state': vm_states.STOPPED} instance = self._create_instance_obj(params=params) @@ -1203,6 +1232,19 @@ def test_pause(self): self.assertEqual(task_states.PAUSING, instance.task_state) + def _test_pause_fails(self, vm_state): + params = dict(vm_state=vm_state) + instance = self._create_instance_obj(params=params) + self.assertIsNone(instance.task_state) + self.assertRaises(exception.InstanceInvalidState, + self.compute_api.pause, + self.context, instance) + + def test_pause_fails_invalid_states(self): + invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE])) + for state in invalid_vm_states: + self._test_pause_fails(state) + def test_unpause(self): # Ensure instance can be unpaused. params = dict(vm_state=vm_states.PAUSED) From 002844d08ddca743ce7f83aaf5d5279a2d35e29e Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sat, 31 May 2014 00:37:53 -0700 Subject: [PATCH 004/486] xenapi: virt rescue method now supports objects Commit a37a96fd13b933123e80ad24e37fc37d73c16059 ensured that the compute API and manager are now getting objects for the rescue method. This patch adds support to the virt driver code. Change-Id: I06e5032526f791dfc0c523f2302d2edff5023f78 --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 315d231cac..ccfaf300f9 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1407,11 +1407,11 @@ def rescue(self, context, instance, network_info, image_meta, - spawn a rescue VM (the vm name-label will be instance-N-rescue). """ - rescue_name_label = '%s-rescue' % instance['name'] + rescue_name_label = '%s-rescue' % instance.name rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label) if rescue_vm_ref: raise RuntimeError(_("Instance is already in Rescue Mode: %s") - % instance['name']) + % instance.name) vm_ref = self._get_vm_opaque_ref(instance) vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) From 9cd2f66ee83abeee3d07e2614b16d1f1d65ffb82 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sat, 31 May 2014 00:48:57 -0700 Subject: [PATCH 005/486] xenapi: virt unrescue method now supports objects Commit 65ad230af5ece020822fa963dd30fd641a54ae91 ensured that the compute API and manager are now getting objects for the unrescue method. This patch adds support to the virt driver code. Change-Id: Ie6b587d3f89202a6987291bfaea88290241651f2 --- nova/virt/xenapi/vmops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 315d231cac..b71821c446 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1436,10 +1436,10 @@ def unrescue(self, instance): """ rescue_vm_ref = vm_utils.lookup(self._session, - "%s-rescue" % instance['name']) + "%s-rescue" % instance.name) if not rescue_vm_ref: raise exception.InstanceNotInRescueMode( - instance_id=instance['uuid']) + instance_id=instance.uuid) original_vm_ref = self._get_vm_opaque_ref(instance) From 586c6686aaa9e1d0de6d920371f1f489da0a2a9e Mon Sep 17 00:00:00 2001 From: Irena Berezovsky Date: Mon, 28 Apr 2014 09:11:51 +0300 Subject: [PATCH 006/486] Use VIF details dictionary to get physical_network Modify mlnx_direct plug and unplug to retrieve physical_network from VIF details dictionary if network meta dictionary does not contain physical_network. This will serve ML2 case following the guidelines to populate vif_details dictionary with attributes required for port plugging. Change-Id: I0d97bc875be2fff18087c78accef3ec81c059c4b Closes-Bug: 1304872 --- nova/exception.py | 4 +++ nova/network/model.py | 7 ++++++ nova/tests/virt/libvirt/test_vif.py | 39 +++++++++++++++++++++++++++++ nova/virt/libvirt/vif.py | 13 ++++++---- 4 files changed, 58 insertions(+), 5 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index a0c5cae879..cf796b24b0 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -634,6 +634,10 @@ class ExternalNetworkAttachForbidden(Forbidden): "external network %(network_uuid)s") +class NetworkMissingPhysicalNetwork(NovaException): + msg_fmt = _("Physical network is missing for network %(network_uuid)s") + + class DatastoreNotFound(NotFound): msg_fmt = _("Could not find the datastore reference(s) which the VM uses.") diff --git a/nova/network/model.py b/nova/network/model.py index 7207cba546..198132e580 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -43,6 +43,7 @@ def ensure_string_keys(d): # class VIF_DETAIL_PORT_FILTER = 'port_filter' VIF_DETAIL_OVS_HYBRID_PLUG = 'ovs_hybrid_plug' +VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network' # Constants for the 'vif_model' values VIF_MODEL_VIRTIO = 'virtio' @@ -344,6 +345,12 @@ def is_hybrid_plug_enabled(self): def is_neutron_filtering_enabled(self): return self['details'].get(VIF_DETAIL_PORT_FILTER, False) + def get_physical_network(self): + phy_network = self['network']['meta'].get('physical_network') + if not phy_network: + phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK) + return phy_network + @classmethod def hydrate(cls, vif): vif = cls(**ensure_string_keys(vif)) diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py index f6d41a3d05..f8aef28008 100644 --- a/nova/tests/virt/libvirt/test_vif.py +++ b/nova/tests/virt/libvirt/test_vif.py @@ -195,6 +195,14 @@ class LibvirtVifTestCase(test.TestCase): type=network_model.VIF_TYPE_MLNX_DIRECT, devname='tap-xxx-yyy-zzz') + vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz', + address='ca:fe:de:ad:be:ef', + network=network_mlnx, + type=network_model.VIF_TYPE_MLNX_DIRECT, + details={'physical_network': + 'fake_phy_network'}, + devname='tap-xxx-yyy-zzz') + vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_midonet, @@ -665,6 +673,37 @@ def test_plug_iovisor(self, device_exists): } d.plug_iovisor(instance, self.vif_ivs) + def test_unplug_mlnx_with_details(self): + d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) + with mock.patch.object(utils, 'execute') as execute: + execute.side_effect = processutils.ProcessExecutionError + d.unplug_mlnx_direct(None, self.vif_mlnx_net) + execute.assert_called_once_with('ebrctl', 'del-port', + 'fake_phy_network', + 'ca:fe:de:ad:be:ef', + run_as_root=True) + + def test_plug_mlnx_with_details(self): + d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) + with mock.patch.object(utils, 'execute') as execute: + d.plug_mlnx_direct(self.instance, self.vif_mlnx_net) + execute.assert_called_once_with('ebrctl', 'add-port', + 'ca:fe:de:ad:be:ef', + 'instance-uuid', + 'fake_phy_network', + 'mlnx_direct', + 'eth-xxx-yyy-zzz', + run_as_root=True) + + def test_plug_mlnx_no_physical_network(self): + d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) + with mock.patch.object(utils, 'execute') as execute: + self.assertRaises(exception.NovaException, + d.plug_mlnx_direct, + self.instance, + self.vif_mlnx) + self.assertEqual(0, execute.call_count) + def test_ivs_ethernet_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) self._check_ivs_ethernet_driver(d, diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 263866c38d..34201377cb 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -534,11 +534,12 @@ def plug_mlnx_direct(self, instance, vif): super(LibvirtGenericVIFDriver, self).plug(instance, vif) - network = vif['network'] vnic_mac = vif['address'] device_id = instance['uuid'] - fabric = network['meta']['physical_network'] - + fabric = vif.get_physical_network() + if not fabric: + raise exception.NetworkMissingPhysicalNetwork( + network_uuid=vif['network']['id']) dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH) try: utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric, @@ -723,9 +724,11 @@ def unplug_mlnx_direct(self, instance, vif): super(LibvirtGenericVIFDriver, self).unplug(instance, vif) - network = vif['network'] vnic_mac = vif['address'] - fabric = network['meta']['physical_network'] + fabric = vif.get_physical_network() + if not fabric: + raise exception.NetworkMissingPhysicalNetwork( + network_uuid=vif['network']['id']) try: utils.execute('ebrctl', 'del-port', fabric, vnic_mac, run_as_root=True) From 77aea0e6a3d77f3c747a3f0d7126a8fbfae94263 Mon Sep 17 00:00:00 2001 From: Brian Elliott Date: Tue, 10 Jun 2014 16:46:15 +0000 Subject: [PATCH 007/486] xenapi: Do not retry snapshot upload on 500 If Glance returns a 500 response on an initial attempt to upload a snapshot image, it will set the image status to killed. Any retry attempts will fail with a 409 response. Hence, Nova should not attempt to retry the upload in this case, which will eliminate many pointless retries. Closes-Bug: 1349933 Change-Id: I1a8f2416923a368b02cf8963c747ebbb24d749a1 --- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 8f3c16ff18..0eafa65029 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -224,7 +224,8 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, httplib.REQUEST_ENTITY_TOO_LARGE, httplib.PRECONDITION_FAILED, httplib.CONFLICT, - httplib.FORBIDDEN): + httplib.FORBIDDEN, + httplib.INTERNAL_SERVER_ERROR): # No point in retrying for these conditions raise PluginError("Got Error response [%i] while uploading " "image [%s] " From 0a7527c71228c8e776ad40cedd2cf137fd99f43d Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Wed, 11 Jun 2014 16:24:06 +0300 Subject: [PATCH 008/486] Fixes hyper-v volume attach when host is AD member The domain name gets added to the initiator name used by the host if it's an Active Directory member. Currently the method which gets the initiator name does not take this into account when retrieving the default initiator name. Trying to use a wrong initiator name will lead to an exception when trying to log in to the according iSCSI target. This patch simply appends the domain name (when the host is an AD member) to the initiator name used to log in to iSCSI targets. Closes-Bug: #1328870 Change-Id: Ifbe762c685e46081059a01043431b2c4ac5473fc --- nova/virt/hyperv/basevolumeutils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py index d518e84667..176d9c7c9b 100644 --- a/nova/virt/hyperv/basevolumeutils.py +++ b/nova/virt/hyperv/basevolumeutils.py @@ -70,8 +70,9 @@ def get_iscsi_initiator(self): except Exception: LOG.info(_("The ISCSI initiator name can't be found. " "Choosing the default one")) - computer_system = self._conn_cimv2.Win32_ComputerSystem()[0] initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower() + if computer_system.PartofDomain: + initiator_name += '.' + computer_system.Domain.lower() return initiator_name def volume_in_mapping(self, mount_device, block_device_info): From acaf3bc419a10c02172d6ef17bc9c9e97e2d65a9 Mon Sep 17 00:00:00 2001 From: Sergey Nikitin Date: Wed, 18 Jun 2014 15:34:23 +0400 Subject: [PATCH 009/486] Replace assertTrue(not *) with assertFalse(*) Replacements were done in tests to have clearer messages in case of failure Change-Id: I7eedc62e0d570ee5818c4aa9b28ad8163c63b775 --- nova/tests/db/test_migrations.py | 2 +- nova/tests/test_service.py | 2 +- nova/tests/virt/xenapi/test_xenapi.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index 587ce8af41..ded292d983 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -841,4 +841,4 @@ def test_all_migrations_have_downgrade(self): helpful_msg = (_("The following migrations are missing a downgrade:" "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) - self.assertTrue(not missing_downgrade, helpful_msg) + self.assertFalse(missing_downgrade, helpful_msg) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index f0725e6b55..869f30f44c 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -104,7 +104,7 @@ def test_service_enabled_on_create_based_on_flag(self): app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) - self.assertTrue(not ref['disabled']) + self.assertFalse(ref['disabled']) def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 40cbaf0ee5..0f1240a0b1 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -526,7 +526,7 @@ def fake_image_upload(_self, ctx, session, inst, vdi_uuids, for vdi_ref in xenapi_fake.get_all('VDI'): vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) name_label = vdi_rec["name_label"] - self.assertTrue(not name_label.endswith('snapshot')) + self.assertFalse(name_label.endswith('snapshot')) self.assertTrue(self.fake_upload_called) From 89cd2f9a4dc4c12aaf0ce2ed2c492ad60fd1f6d7 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 9 Jun 2014 04:13:18 +0800 Subject: [PATCH 010/486] Add supported file system type check at virt layer Currently nova doesn't check whether the file system type is supported by virt layer(hypervisor) before use it. This patch adds current support file system type definition then create a default check function at virt layer. Following patches will do the real check in different driver such as libvirt. Change-Id: Ie4d876a48b36c1a53b171dd521bdeef868a31486 Partial-Bug: #1293880 --- nova/virt/disk/api.py | 8 ++++++++ nova/virt/driver.py | 12 ++++++++++++ 2 files changed, 20 insertions(+) diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index 034437a718..8b921912ce 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -80,6 +80,14 @@ _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None + +FS_FORMAT_EXT2 = "ext2" +FS_FORMAT_EXT3 = "ext3" +FS_FORMAT_EXT4 = "ext4" +FS_FORMAT_XFS = "xfs" +FS_FORMAT_NTFS = "ntfs" +FS_FORMAT_VFAT = "vfat" + _DEFAULT_FS_BY_OSTYPE = {'linux': 'ext3', 'windows': 'ntfs'} diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 6689e7bc84..34c29ba7de 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -1273,6 +1273,18 @@ def default_device_names_for_instance(self, instance, root_device_name, """Default the missing device names in the block device mapping.""" raise NotImplementedError() + def is_supported_fs_format(self, fs_type): + """Check whether the file format is supported by this driver + + :param fs_type: the file system type to be checked, + the validate values are defined at disk API module. + """ + # NOTE(jichenjc): Return False here so that every hypervisor + # need to define their supported file system + # type and implement this function at their + # virt layer. + return False + def load_compute_driver(virtapi, compute_driver=None): """Load a compute driver module. From b26a90957a056823556e64b83536130d50d7eb7f Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Thu, 19 Jun 2014 03:01:03 -0700 Subject: [PATCH 011/486] Reduce unit test times for glance The class TestGlanceApiServers did not inherit test.NoDBTestCase. Change-Id: Ieafe300d793685f5b146999383695733eba3da19 --- nova/tests/image/test_glance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 2a28a87102..50c8480b7c 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -1225,7 +1225,7 @@ def test_generate_glance_https_url(self): self.assertEqual(generated_url, https_url) -class TestGlanceApiServers(test.TestCase): +class TestGlanceApiServers(test.NoDBTestCase): def test_get_ipv4_api_servers(self): self.flags(glance_api_servers=['10.0.1.1:9292', From 54997582296cc93acdcd7686bd8d52fca083afb1 Mon Sep 17 00:00:00 2001 From: Sergey Nikitin Date: Wed, 18 Jun 2014 15:00:53 +0400 Subject: [PATCH 012/486] Replace assertTrue/False with assertIn/NotIn The following replacements were done in tests to have clearer messages in case of failure: - assertTrue(* in *) with assertIn - assertTrue(* not in *) with assertNotIn - assertFalse(* in *) with assertNotIn assertTrue/False with custom error messages weren't replaced because assertIn/NotIn doesn't have argument 'message' and we'll lose error messages if we replace it. Also assertTrue/False with operator 'any' were replaced with more simpler asserts assertIn/NotIn. Change-Id: If5e78db21d5302259a4d69fd82c62baf39a5d8f8 --- .../compute/contrib/test_migrations.py | 6 ++--- .../compute/contrib/test_server_groups.py | 2 +- .../compute/plugins/v3/test_flavor_manage.py | 4 ++-- .../compute/plugins/v3/test_migrations.py | 6 ++--- .../compute/plugins/v3/test_server_actions.py | 2 +- .../compute/plugins/v3/test_servers.py | 4 ++-- .../openstack/compute/test_server_actions.py | 2 +- nova/tests/compute/test_compute.py | 3 +-- nova/tests/conductor/test_conductor.py | 6 ++--- nova/tests/network/test_neutronv2.py | 7 +++--- nova/tests/scheduler/test_weights.py | 2 +- nova/tests/test_block_device.py | 8 +++---- nova/tests/test_iptables_network.py | 9 ++++--- nova/tests/test_utils.py | 24 +++++++++---------- nova/tests/virt/libvirt/test_driver.py | 2 +- nova/tests/virt/xenapi/test_vm_utils.py | 3 +-- 16 files changed, 43 insertions(+), 47 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_migrations.py b/nova/tests/api/openstack/compute/contrib/test_migrations.py index 73097bfaf8..b18bc741f0 100644 --- a/nova/tests/api/openstack/compute/contrib/test_migrations.py +++ b/nova/tests/api/openstack/compute/contrib/test_migrations.py @@ -87,9 +87,9 @@ def test_index(self): 'migrations': migrations.output(migrations_obj)} for mig in migrations_in_progress['migrations']: - self.assertTrue('id' in mig) - self.assertTrue('deleted' not in mig) - self.assertTrue('deleted_at' not in mig) + self.assertIn('id', mig) + self.assertNotIn('deleted', mig) + self.assertNotIn('deleted_at', mig) filters = {'host': 'host1', 'status': 'migrating', 'cell_name': 'ChildCell'} diff --git a/nova/tests/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/api/openstack/compute/contrib/test_server_groups.py index d8308cb8a2..5da31e0dff 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_groups.py @@ -446,7 +446,7 @@ def _verify_server_group(self, raw_group, tree): for idx, gr_child in enumerate(child): self.assertEqual(self._tag(gr_child), 'meta') key = gr_child.get('key') - self.assertTrue(key in ['key1', 'key2']) + self.assertIn(key, ['key1', 'key2']) metas[key] = gr_child.text self.assertEqual(len(metas), len(metadata)) for k in metadata: diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py index 833f1b87b3..7f05d77894 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py @@ -314,8 +314,8 @@ def test_create_private_flavor_should_create_flavor_access(self): "tenant_id": "%s" % ctxt.project_id, "flavor_id": "%s" % body["flavor"]["id"] } - self.assertTrue(expected_flavor_access_body in - flavor_access_body["flavor_access"]) + self.assertIn(expected_flavor_access_body, + flavor_access_body["flavor_access"]) def test_create_public_flavor_should_not_create_flavor_access(self): req_body = { diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_migrations.py b/nova/tests/api/openstack/compute/plugins/v3/test_migrations.py index 47d7e9f7b4..6201af1c41 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_migrations.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_migrations.py @@ -86,9 +86,9 @@ def test_index(self): 'migrations': migrations.output(migrations_obj)} for mig in migrations_in_progress['migrations']: - self.assertTrue('id' in mig) - self.assertTrue('deleted' not in mig) - self.assertTrue('deleted_at' not in mig) + self.assertIn('id', mig) + self.assertNotIn('deleted', mig) + self.assertNotIn('deleted_at', mig) filters = {'host': 'host1', 'status': 'migrating', 'cell_name': 'ChildCell'} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index b02bb5f478..d15c8a007b 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -910,7 +910,7 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id, self.assertEqual(bdms[0]['snapshot_id'], snapshot['id']) for fld in ('connection_info', 'id', 'instance_uuid', 'device_name'): - self.assertTrue(fld not in bdms[0]) + self.assertNotIn(fld, bdms[0]) for k in extra_properties.keys(): self.assertEqual(properties[k], extra_properties[k]) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 1ad97beb49..7a19936f48 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -721,7 +721,7 @@ def fake_get_all(context, filters=None, sort_key=None, req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake') res = self.controller.index(req) - self.assertTrue('servers' in res) + self.assertIn('servers', res) def test_tenant_id_filter_implies_all_tenants(self): def fake_get_all(context, filters=None, sort_key=None, @@ -743,7 +743,7 @@ def fake_get_all(context, filters=None, sort_key=None, req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake', use_admin_context=True) res = self.controller.index(req) - self.assertTrue('servers' in res) + self.assertIn('servers', res) def test_all_tenants_param_normal(self): def fake_get_all(context, filters=None, sort_key=None, diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 63d6379ca6..c5cd347211 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -1090,7 +1090,7 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id, self.assertEqual(bdms[0]['snapshot_id'], snapshot['id']) for fld in ('connection_info', 'id', 'instance_uuid', 'device_name'): - self.assertTrue(fld not in bdms[0]) + self.assertNotIn(fld, bdms[0]) for k in extra_properties.keys(): self.assertEqual(properties[k], extra_properties[k]) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index a2483b7187..48756f2562 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -5585,8 +5585,7 @@ def test_add_instance_fault_with_remote_error(self): exc_info = None def fake_db_fault_create(ctxt, values): - self.assertTrue('raise messaging.RemoteError' - in values['details']) + self.assertIn('raise messaging.RemoteError', values['details']) del values['details'] expected = { diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index df0efd167a..15cadc6038 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -814,8 +814,7 @@ def _setup_aggregate_with_host(self): def test_aggregate_host_add(self): aggregate_ref = self._setup_aggregate_with_host() - self.assertTrue(any([host == 'bar' - for host in aggregate_ref['hosts']])) + self.assertIn('bar', aggregate_ref['hosts']) db.aggregate_delete(self.context.elevated(), aggregate_ref['id']) @@ -828,8 +827,7 @@ def test_aggregate_host_delete(self): aggregate_ref = db.aggregate_get(self.context.elevated(), aggregate_ref['id']) - self.assertFalse(any([host == 'bar' - for host in aggregate_ref['hosts']])) + self.assertNotIn('bar', aggregate_ref['hosts']) db.aggregate_delete(self.context.elevated(), aggregate_ref['id']) diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 3d2924c031..cb4de32243 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -467,9 +467,10 @@ def _verify_nw_info(self, nw_inf, index=0): self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address']) self.assertEqual('10.0.%s.0/24' % id_suffix, nw_inf[index]['network']['subnets'][0]['cidr']) - self.assertTrue(model.IP(address='8.8.%s.1' % id_suffix, - version=4, type='dns') in - nw_inf[index]['network']['subnets'][0]['dns']) + + ip_addr = model.IP(address='8.8.%s.1' % id_suffix, + version=4, type='dns') + self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns']) def _get_instance_nw_info(self, number): api = neutronapi.API() diff --git a/nova/tests/scheduler/test_weights.py b/nova/tests/scheduler/test_weights.py index ce2d1c5c4f..0f342bc0e4 100644 --- a/nova/tests/scheduler/test_weights.py +++ b/nova/tests/scheduler/test_weights.py @@ -207,7 +207,7 @@ def _check_parsing_result(self, weigher, setting, results): weigher._parse_setting() self.assertTrue(len(results) == len(weigher.setting)) for item in results: - self.assertTrue(item in weigher.setting) + self.assertIn(item, weigher.setting) def test_parse_setting(self): weigher = self.weight_classes[0]() diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py index 1e86405a1a..103056d72c 100644 --- a/nova/tests/test_block_device.py +++ b/nova/tests/test_block_device.py @@ -333,7 +333,7 @@ def fake_validate(obj, dct): self.assertIn('field1', dev_dict) self.assertIn('field2', dev_dict) self.assertIn('db_field1', dev_dict) - self.assertFalse('db_field2'in dev_dict) + self.assertNotIn('db_field2', dev_dict) # Make sure all expected fields are defaulted dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}) @@ -341,7 +341,7 @@ def fake_validate(obj, dct): self.assertIn('field2', dev_dict) self.assertIsNone(dev_dict['field2']) self.assertNotIn('db_field1', dev_dict) - self.assertFalse('db_field2'in dev_dict) + self.assertNotIn('db_field2', dev_dict) # Unless they are not meant to be dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}, @@ -349,7 +349,7 @@ def fake_validate(obj, dct): self.assertIn('field1', dev_dict) self.assertNotIn('field2', dev_dict) self.assertNotIn('db_field1', dev_dict) - self.assertFalse('db_field2'in dev_dict) + self.assertNotIn('db_field2', dev_dict) def test_validate(self): self.assertRaises(exception.InvalidBDMFormat, @@ -497,7 +497,7 @@ def test_image_mapping(self): mapping_bdm = fake_block_device.FakeDbBlockDeviceDict( bdm).get_image_mapping() for fld in removed_fields: - self.assertTrue(fld not in mapping_bdm) + self.assertNotIn(fld, mapping_bdm) def _test_snapshot_from_bdm(self, template): snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template) diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py index 1d46bc1254..bd20b101bb 100644 --- a/nova/tests/test_iptables_network.py +++ b/nova/tests/test_iptables_network.py @@ -117,14 +117,13 @@ def test_filter_rules_are_wrapped(self): table = self.manager.ipv4['filter'] table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') new_lines = self.manager._modify_rules(current_lines, table, 'filter') - self.assertTrue('[0:0] -A %s-FORWARD ' - '-s 1.2.3.4/5 -j DROP' % self.binary_name in new_lines) + self.assertIn('[0:0] -A %s-FORWARD ' + '-s 1.2.3.4/5 -j DROP' % self.binary_name, new_lines) table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') new_lines = self.manager._modify_rules(current_lines, table, 'filter') - self.assertTrue('[0:0] -A %s-FORWARD ' - '-s 1.2.3.4/5 -j DROP' % self.binary_name - not in new_lines) + self.assertNotIn('[0:0] -A %s-FORWARD ' + '-s 1.2.3.4/5 -j DROP' % self.binary_name, new_lines) def test_remove_rules_regex(self): current_lines = self.sample_nat diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 6969240cac..6955345f45 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -353,20 +353,20 @@ def test_monkey_patch(self): self.assertEqual(ret_b, 8) package_a = self.example_package + 'example_a.' - self.assertTrue(package_a + 'example_function_a' - in nova.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertIn(package_a + 'example_function_a', + nova.tests.monkey_patch_example.CALLED_FUNCTION) - self.assertTrue(package_a + 'ExampleClassA.example_method' - in nova.tests.monkey_patch_example.CALLED_FUNCTION) - self.assertTrue(package_a + 'ExampleClassA.example_method_add' - in nova.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertIn(package_a + 'ExampleClassA.example_method', + nova.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertIn(package_a + 'ExampleClassA.example_method_add', + nova.tests.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' - self.assertFalse(package_b + 'example_function_b' - in nova.tests.monkey_patch_example.CALLED_FUNCTION) - self.assertFalse(package_b + 'ExampleClassB.example_method' - in nova.tests.monkey_patch_example.CALLED_FUNCTION) - self.assertFalse(package_b + 'ExampleClassB.example_method_add' - in nova.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertNotIn(package_b + 'example_function_b', + nova.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertNotIn(package_b + 'ExampleClassB.example_method', + nova.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertNotIn(package_b + 'ExampleClassB.example_method_add', + nova.tests.monkey_patch_example.CALLED_FUNCTION) class MonkeyPatchDefaultTestCase(test.NoDBTestCase): diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index ccbb0a4cb5..4660588102 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -7102,7 +7102,7 @@ def fake_baselineCPU(cpu, flag): conn.post_live_migration_at_destination(self.context, instance, network_info, True, block_device_info=block_device_info) - self.assertTrue('fake' in self.resultXML) + self.assertIn('fake', self.resultXML) self.assertTrue( block_device_info['block_device_mapping'][0].save.called) diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py index b302ebb82c..9dd6288ef4 100644 --- a/nova/tests/virt/xenapi/test_vm_utils.py +++ b/nova/tests/virt/xenapi/test_vm_utils.py @@ -802,8 +802,7 @@ def test_exception_msg_contains_vm_name(self): try: vm_utils.vm_ref_or_raise('session', 'somename') except exception.InstanceNotFound as e: - self.assertTrue( - 'somename' in str(e)) + self.assertIn('somename', str(e)) mock.VerifyAll() From 800d3abf00ae98c87af372761acfc10f63f625c1 Mon Sep 17 00:00:00 2001 From: Haiwei Xu Date: Wed, 15 Jan 2014 04:15:06 +0900 Subject: [PATCH 013/486] Add API schema for v2.1/v3 hosts API By defining the API schema, it is possible to separate the validation code from the API method. The API method can be more simple. In addition, a response of API validation error can be consistent for whole of Nova APIs. Partially implements blueprint v3-api-schema Change-Id: Ie252dce67f8e9d3292d8dad767b6113f7affcffd --- .../api/openstack/compute/plugins/v3/hosts.py | 40 +++++------------ .../api/openstack/compute/schemas/v3/hosts.py | 43 ++++++++++++++++++ .../compute/plugins/v3/test_hosts.py | 44 ++++++++++++------- 3 files changed, 81 insertions(+), 46 deletions(-) create mode 100644 nova/api/openstack/compute/schemas/v3/hosts.py diff --git a/nova/api/openstack/compute/plugins/v3/hosts.py b/nova/api/openstack/compute/plugins/v3/hosts.py index 5087b0171e..cea2804290 100644 --- a/nova/api/openstack/compute/plugins/v3/hosts.py +++ b/nova/api/openstack/compute/plugins/v3/hosts.py @@ -17,8 +17,10 @@ import webob.exc +from nova.api.openstack.compute.schemas.v3 import hosts from nova.api.openstack import extensions from nova.api.openstack import wsgi +from nova.api import validation from nova import compute from nova import exception from nova.openstack.common.gettextutils import _ @@ -92,49 +94,29 @@ def index(self, req): return {'hosts': hosts} @extensions.expected_errors((400, 404, 501)) + @validation.schema(hosts.update) def update(self, req, id, body): """:param body: example format {'host': {'status': 'enable', 'maintenance_mode': 'enable'}} :returns: """ - def read_enabled(orig_val, msg): + def read_enabled(orig_val): """:param orig_val: A string with either 'enable' or 'disable'. May be surrounded by whitespace, and case doesn't matter - :param msg: The message to be passed to HTTPBadRequest. A single - %s will be replaced with orig_val. :returns: True for 'enabled' and False for 'disabled' """ val = orig_val.strip().lower() - if val == "enable": - return True - elif val == "disable": - return False - else: - raise webob.exc.HTTPBadRequest(explanation=msg % orig_val) + return val == "enable" context = req.environ['nova.context'] authorize(context) # See what the user wants to 'update' - if not self.is_valid_body(body, 'host'): - raise webob.exc.HTTPBadRequest( - explanation=_("The request body invalid")) - params = dict([(k.strip().lower(), v) - for k, v in body['host'].iteritems()]) - orig_status = status = params.pop('status', None) - orig_maint_mode = maint_mode = params.pop('maintenance_mode', None) - # Validate the request - if len(params) > 0: - # Some extra param was passed. Fail. - explanation = _("Invalid update setting: '%s'") % params.keys()[0] - raise webob.exc.HTTPBadRequest(explanation=explanation) - if orig_status is not None: - status = read_enabled(orig_status, _("Invalid status: '%s'")) - if orig_maint_mode is not None: - maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'")) - if status is None and maint_mode is None: - explanation = _("'status' or 'maintenance_mode' needed for " - "host update") - raise webob.exc.HTTPBadRequest(explanation=explanation) + status = body['host'].get('status') + maint_mode = body['host'].get('maintenance_mode') + if status is not None: + status = read_enabled(status) + if maint_mode is not None: + maint_mode = read_enabled(maint_mode) # Make the calls and merge the results result = {'host': id} if status is not None: diff --git a/nova/api/openstack/compute/schemas/v3/hosts.py b/nova/api/openstack/compute/schemas/v3/hosts.py new file mode 100644 index 0000000000..30ec09f40a --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/hosts.py @@ -0,0 +1,43 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +update = { + 'type': 'object', + 'properties': { + 'host': { + 'type': 'object', + 'properties': { + 'status': { + 'type': 'string', + 'enum': ['enable', 'disable', + 'Enable', 'Disable', + 'ENABLE', 'DISABLE'], + }, + 'maintenance_mode': { + 'type': 'string', + 'enum': ['enable', 'disable', + 'Enable', 'Disable', + 'ENABLE', 'DISABLE'], + }, + }, + 'anyOf': [ + {'required': ['status']}, + {'required': ['maintenance_mode']} + ], + 'additionalProperties': False, + }, + }, + 'required': ['host'], + 'additionalProperties': False, +} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py b/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py index 2029ae9002..d57f536116 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py @@ -167,7 +167,7 @@ def setUp(self): def _test_host_update(self, host, key, val, expected_value): body = {'host': {key: val}} - result = self.controller.update(self.req, host, body) + result = self.controller.update(self.req, host, body=body) self.assertEqual(result['host'][key], expected_value) def test_list_hosts(self): @@ -211,7 +211,7 @@ def _test_host_update_service_unavailable(self, key, val): body = {'host': {key: val}} host = "serviceunavailable" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, host, body) + self.req, host, body=body) def test_enable_host_service_unavailable(self): self._test_host_update_service_unavailable('status', 'enable') @@ -307,45 +307,55 @@ def test_host_power_action_bad_host(self): def test_bad_status_value(self): bad_body = {"host": {"status": "bad"}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body) bad_body2 = {"host": {"status": "disablabc"}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body2) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body2) def test_bad_update_key(self): bad_body = {"host": {"crazy": "bad"}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body) def test_bad_update_key_type(self): bad_body = {"host": "abc"} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body) bad_body = {"host": None} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body) def test_bad_update_empty(self): bad_body = {} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body) def test_bad_update_key_and_correct_update_key(self): bad_body = {"host": {"status": "disable", "crazy": "bad"}} - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - self.req, "host_c1", bad_body) + self.assertRaises(exception.ValidationError, self.controller.update, + self.req, "host", body=bad_body) def test_good_update_keys(self): body = {"host": {"status": "disable", "maintenance_mode": "enable"}} - result = self.controller.update(self.req, 'host_c1', body) + result = self.controller.update(self.req, 'host_c1', body=body) self.assertEqual(result["host"]["host"], "host_c1") self.assertEqual(result["host"]["status"], "disabled") self.assertEqual(result["host"]["maintenance_mode"], "on_maintenance") + def test_update_with_status_key_only(self): + body = {"host": {"status": "enable"}} + result = self.controller.update(self.req, 'host_c1', body=body) + self.assertEqual("enabled", result["host"]["status"]) + + def test_update_with_maintenance_mode_key_only(self): + body = {"host": {"maintenance_mode": "enable"}} + result = self.controller.update(self.req, 'host_c1', body=body) + self.assertEqual("on_maintenance", result["host"]["maintenance_mode"]) + def test_show_forbidden(self): self.req.environ["nova.context"].is_admin = False dest = 'dummydest' From 40128bb71bca5d4c54120d34313de8c5d9c71e0c Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Tue, 8 Apr 2014 13:46:49 +0900 Subject: [PATCH 014/486] Add API schema for v2.1/v3 flavors_extraspecs API By defining the API schema, it is possible to separate the validation code from the API method. The API method can be more simple. In addition, a response of API validation error can be consistent for the whole Nova API. Partially implements blueprint v3-api-schema Change-Id: I71f89b59dd4e542d2befa971182cc587b6fec269 --- .../compute/plugins/v3/flavors_extraspecs.py | 28 ++---- .../compute/schemas/v3/flavors_extraspecs.py | 33 +++++++ nova/api/validation/parameter_types.py | 11 +++ nova/api/validation/validators.py | 7 +- .../plugins/v3/test_flavors_extra_specs.py | 93 +++++++++++++------ nova/tests/test_api_validation.py | 42 +++++++++ 6 files changed, 165 insertions(+), 49 deletions(-) create mode 100644 nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py diff --git a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py index cb0580aa04..ff2e3beb49 100644 --- a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py +++ b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py @@ -15,9 +15,10 @@ import webob +from nova.api.openstack.compute.schemas.v3 import flavors_extraspecs from nova.api.openstack import extensions from nova.api.openstack import wsgi -from nova.compute import flavors +from nova.api import validation from nova import exception from nova import objects from nova.openstack.common.db import exception as db_exc @@ -37,17 +38,6 @@ def _get_extra_specs(self, context, flavor_id): flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) return dict(extra_specs=flavor.extra_specs) - def _check_body(self, body): - if body is None or body == "": - expl = _('No Request Body') - raise webob.exc.HTTPBadRequest(explanation=expl) - - def _check_key_names(self, keys): - try: - flavors.validate_extra_spec_keys(keys) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest(explanation=error.format_message()) - @extensions.expected_errors(()) def index(self, req, flavor_id): """Returns the list of extra specs for a given flavor.""" @@ -57,14 +47,12 @@ def index(self, req, flavor_id): @extensions.expected_errors((400, 404, 409)) @wsgi.response(201) + @validation.schema(flavors_extraspecs.create) def create(self, req, flavor_id, body): context = req.environ['nova.context'] self.authorize(context, action='create') - self._check_body(body) - specs = body.get('extra_specs', {}) - if not specs or type(specs) is not dict: - raise webob.exc.HTTPBadRequest(_('No or bad extra_specs provided')) - self._check_key_names(specs.keys()) + + specs = body['extra_specs'] try: flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) flavor.extra_specs = dict(flavor.extra_specs, **specs) @@ -77,16 +65,14 @@ def create(self, req, flavor_id, body): return body @extensions.expected_errors((400, 404, 409)) + @validation.schema(flavors_extraspecs.update) def update(self, req, flavor_id, id, body): context = req.environ['nova.context'] self.authorize(context, action='update') - self._check_body(body) + if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) - if len(body) > 1: - expl = _('Request body contains too many items') - raise webob.exc.HTTPBadRequest(explanation=expl) try: flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) flavor.extra_specs = dict(flavor.extra_specs, **body) diff --git a/nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py b/nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py new file mode 100644 index 0000000000..0f702b78e1 --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py @@ -0,0 +1,33 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from nova.api.validation import parameter_types + +create = { + 'type': 'object', + 'properties': { + 'extra_specs': parameter_types.metadata + }, + 'required': ['extra_specs'], + 'additionalProperties': False, +} + + +update = copy.deepcopy(parameter_types.metadata) +update.update({ + 'minProperties': 1, + 'maxProperties': 1 +}) diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py index 8c425df43e..48829a7f7c 100644 --- a/nova/api/validation/parameter_types.py +++ b/nova/api/validation/parameter_types.py @@ -78,3 +78,14 @@ image_ref = { 'type': 'string', } + + +metadata = { + 'type': 'object', + 'patternProperties': { + '^[a-zA-Z0-9-_:. ]{1,255}$': { + 'type': 'string', 'maxLength': 255 + } + }, + 'additionalProperties': False +} diff --git a/nova/api/validation/validators.py b/nova/api/validation/validators.py index 3e5e82accb..c34f1ee689 100644 --- a/nova/api/validation/validators.py +++ b/nova/api/validation/validators.py @@ -17,6 +17,7 @@ """ import jsonschema +import six from nova import exception from nova.openstack.common.gettextutils import _ @@ -65,7 +66,11 @@ def validate(self, *args, **kwargs): } else: detail = ex.message - + raise exception.ValidationError(detail=detail) + except TypeError as ex: + # NOTE: If passing non string value to patternProperties parameter, + # TypeError happens. Here is for catching the TypeError. + detail = six.text_type(ex) raise exception.ValidationError(detail=detail) def _number_from_str(self, instance): diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py index 54a6eba7d4..967467f09f 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py @@ -111,7 +111,7 @@ def test_not_found_because_flavor(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1, 'key5') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - req, 1, 'key5', {'key5': 'value5'}) + req, 1, 'key5', body={'key5': 'value5'}) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, 'key5') @@ -120,7 +120,7 @@ def test_not_found_because_flavor(self): with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get: mock_get.side_effect = exception.FlavorNotFound(flavor_id='1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - req, 1, {'extra_specs': {'key5': 'value5'}}) + req, 1, body={'extra_specs': {'key5': 'value5'}}) def test_delete(self): flavor = dict(test_flavor.fake_flavor, @@ -154,7 +154,7 @@ def test_create(self): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs', use_admin_context=True) - res_dict = self.controller.create(req, 1, body) + res_dict = self.controller.create(req, 1, body=body) self.assertEqual('value1', res_dict['extra_specs']['key1']) self.assertEqual(self.controller.create.wsgi_code, 201) @@ -167,17 +167,40 @@ def test_create_no_admin(self): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs') self.assertRaises(exception.Forbidden, self.controller.create, - req, 1, body) + req, 1, body=body) - def test_create_empty_body(self): + def _test_create_bad_request(self, body): self.stubs.Set(nova.db, 'flavor_extra_specs_update_or_create', return_create_flavor_extra_specs) req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs', use_admin_context=True) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, 1, '') + self.assertRaises(exception.ValidationError, self.controller.create, + req, 1, body=body) + + def test_create_empty_body(self): + self._test_create_bad_request('') + + def test_create_non_dict_extra_specs(self): + self._test_create_bad_request({"extra_specs": "non_dict"}) + + def test_create_non_string_key(self): + self._test_create_bad_request({"extra_specs": {None: "value1"}}) + + def test_create_non_string_value(self): + self._test_create_bad_request({"extra_specs": {"key1": None}}) + + def test_create_zero_length_key(self): + self._test_create_bad_request({"extra_specs": {"": "value1"}}) + + def test_create_long_key(self): + key = "a" * 256 + self._test_create_bad_request({"extra_specs": {key: "value1"}}) + + def test_create_long_value(self): + value = "a" * 256 + self._test_create_bad_request({"extra_specs": {"key1": value}}) def test_create_flavor_not_found(self): def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): @@ -190,7 +213,7 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs', use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - req, 1, body) + req, 1, body=body) def test_create_flavor_db_duplicate(self): def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): @@ -203,7 +226,7 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs', use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, - req, 1, body) + req, 1, body=body) @mock.patch('nova.db.flavor_extra_specs_update_or_create') def test_create_invalid_specs_key(self, mock_flavor_extra_specs): @@ -215,8 +238,8 @@ def test_create_invalid_specs_key(self, mock_flavor_extra_specs): req = fakes.HTTPRequest.blank('/flavors/1/extra-specs', use_admin_context=True) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - req, 1, body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, 1, body=body) @mock.patch('nova.db.flavor_extra_specs_update_or_create') def test_create_valid_specs_key(self, mock_flavor_extra_specs): @@ -228,7 +251,7 @@ def test_create_valid_specs_key(self, mock_flavor_extra_specs): req = fakes.HTTPRequest.blank('/flavors/1/extra-specs', use_admin_context=True) - res_dict = self.controller.create(req, 1, body) + res_dict = self.controller.create(req, 1, body=body) self.assertEqual('value1', res_dict['extra_specs'][key]) self.assertEqual(self.controller.create.wsgi_code, 201) @@ -240,7 +263,7 @@ def test_update_item(self): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1', use_admin_context=True) - res_dict = self.controller.update(req, 1, 'key1', body) + res_dict = self.controller.update(req, 1, 'key1', body=body) self.assertEqual('value1', res_dict['key1']) @@ -252,28 +275,44 @@ def test_update_item_no_admin(self): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1') self.assertRaises(exception.Forbidden, self.controller.update, - req, 1, 'key1', body) + req, 1, 'key1', body=body) - def test_update_item_empty_body(self): + def _test_update_item_bad_request(self, body): self.stubs.Set(nova.db, 'flavor_extra_specs_update_or_create', return_create_flavor_extra_specs) req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1', use_admin_context=True) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, 1, 'key1', '') + self.assertRaises(exception.ValidationError, self.controller.update, + req, 1, 'key1', body=body) + + def test_update_item_empty_body(self): + self._test_update_item_bad_request('') def test_update_item_too_many_keys(self): - self.stubs.Set(nova.db, - 'flavor_extra_specs_update_or_create', - return_create_flavor_extra_specs) body = {"key1": "value1", "key2": "value2"} + self._test_update_item_bad_request(body) - req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1', - use_admin_context=True) - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, 1, 'key1', body) + def test_update_item_non_dict_extra_specs(self): + self._test_update_item_bad_request("non_dict") + + def test_update_item_non_string_key(self): + self._test_update_item_bad_request({None: "value1"}) + + def test_update_item_non_string_value(self): + self._test_update_item_bad_request({"key1": None}) + + def test_update_item_zero_length_key(self): + self._test_update_item_bad_request({"": "value1"}) + + def test_update_item_long_key(self): + key = "a" * 256 + self._test_update_item_bad_request({key: "value1"}) + + def test_update_item_long_value(self): + value = "a" * 256 + self._test_update_item_bad_request({"key1": value}) def test_update_item_body_uri_mismatch(self): self.stubs.Set(nova.db, @@ -284,7 +323,7 @@ def test_update_item_body_uri_mismatch(self): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/bad', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, 1, 'bad', body) + req, 1, 'bad', body=body) def test_update_flavor_not_found(self): def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): @@ -298,7 +337,7 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1', use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - req, 1, 'key1', body) + req, 1, 'key1', body=body) def test_update_flavor_db_duplicate(self): def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): @@ -312,4 +351,4 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs): req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1', use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - req, 1, 'key1', body) + req, 1, 'key1', body=body) diff --git a/nova/tests/test_api_validation.py b/nova/tests/test_api_validation.py index fc1b4598a1..7544c8d7b3 100644 --- a/nova/tests/test_api_validation.py +++ b/nova/tests/test_api_validation.py @@ -144,6 +144,48 @@ def test_validate_additionalProperties_disable_fails(self): expected_detail=detail) +class PatternPropertiesTestCase(APIValidationTestCase): + + def setUp(self): + super(PatternPropertiesTestCase, self).setUp() + schema = { + 'patternProperties': { + '^[a-zA-Z0-9]{1,10}$': { + 'type': 'string' + }, + }, + 'additionalProperties': False, + } + + @validation.schema(request_body_schema=schema) + def post(body): + return 'Validation succeeded.' + + self.post = post + + def test_validate_patternProperties(self): + self.assertEqual('Validation succeeded.', + self.post(body={'foo': 'bar'})) + + def test_validate_patternProperties_fails(self): + detail = "Additional properties are not allowed ('__' was unexpected)" + self.check_validation_error(self.post, body={'__': 'bar'}, + expected_detail=detail) + + detail = "Additional properties are not allowed ('' was unexpected)" + self.check_validation_error(self.post, body={'': 'bar'}, + expected_detail=detail) + + detail = ("Additional properties are not allowed ('0123456789a' was" + " unexpected)") + self.check_validation_error(self.post, body={'0123456789a': 'bar'}, + expected_detail=detail) + + detail = "expected string or buffer" + self.check_validation_error(self.post, body={None: 'bar'}, + expected_detail=detail) + + class StringTestCase(APIValidationTestCase): def setUp(self): From e2087e483f082b581f1111023596936d9a55f543 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 29 Jun 2014 05:51:47 -0700 Subject: [PATCH 015/486] Baremetal driver: remove unused states NULL and INIT were not used since commit c20110d15be37948ddd9ef5f38001328aabf5b1d Change-Id: I25c34872b100807d691e7e15f7b70be7ad1912b9 --- nova/virt/baremetal/baremetal_states.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/virt/baremetal/baremetal_states.py b/nova/virt/baremetal/baremetal_states.py index e48382f5b6..82e41fb3ce 100644 --- a/nova/virt/baremetal/baremetal_states.py +++ b/nova/virt/baremetal/baremetal_states.py @@ -24,8 +24,6 @@ """ -NULL = None -INIT = 'initializing' ACTIVE = 'active' BUILDING = 'building' DEPLOYING = 'deploying' From 68e008b21b463a261461a0f7bbfaa92f4e9a7e92 Mon Sep 17 00:00:00 2001 From: Feodor Tersin Date: Tue, 27 May 2014 00:00:07 +0400 Subject: [PATCH 016/486] Skip none value attributes for ec2 image bdm output. Fix crash in _format_block_device_mapping when there are None values for formatting attributes in the mapping. Change-Id: I0ebdb844d75cdf1580cc88e1ff40fc21aff96f21 Closes-Bug: #1323403 --- nova/api/ec2/cloud.py | 2 +- nova/tests/api/ec2/test_cloud.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 1b22d280b0..4f21c68581 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -175,7 +175,7 @@ def _format_block_device_mapping(bdm): ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: - if k in bdm: + if bdm.get(k) is not None: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 7b802e850a..76670b7612 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -1486,7 +1486,8 @@ def _setUpImageSet(self, create_volumes_and_snapshots=False): mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}] block_device_mapping2 = [{'device_name': '/dev/sdb1', - 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7'}] + 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7', + 'volume_id': None}] image2 = { 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fake_name', From 4001a160277f1716d41ea9d720ef18e5c6c6111d Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Tue, 1 Jul 2014 12:07:53 +0300 Subject: [PATCH 017/486] VMware: remove unused parameter 'network_info' The parameter 'network_info' is not used for destroying an instance and doesn't need to be passed through the whole stack. Change-Id: I0a8b912ff7693478917e849e17093d1541adeeeb --- nova/tests/virt/vmwareapi/test_driver_api.py | 2 -- nova/tests/virt/vmwareapi/test_vmops.py | 2 +- nova/virt/vmwareapi/driver.py | 4 ++-- nova/virt/vmwareapi/vmops.py | 11 +++++------ 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 2d98a462a8..cbd6710ed0 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -1386,7 +1386,6 @@ def test_destroy_non_existent(self): self.network_info, None, self.destroy_disks) mock_destroy.assert_called_once_with(self.instance, - self.network_info, self.destroy_disks) def test_destroy_instance_without_compute(self): @@ -2352,7 +2351,6 @@ def test_destroy_non_existent(self): self.network_info, None, self.destroy_disks) mock_destroy.assert_called_once_with(self.instance, - self.network_info, self.destroy_disks) def test_destroy_instance_without_compute(self): diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index 7cd1748bd2..06ad426e02 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -428,7 +428,7 @@ def fake_call_method(module, method, *args, **kwargs): _get_vm_ref_from_name.assert_called_once_with(self._session, 'fake_uuid-rescue') _power_off.assert_called_once_with(vm_rescue_ref) - _destroy_instance.assert_called_once_with(r_instance, None, + _destroy_instance.assert_called_once_with(r_instance, instance_name='fake_uuid-rescue') def _test_finish_migration(self, power_on=True, resize_instance=False): diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 1e5f773213..2013733b93 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -191,7 +191,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, if not instance['node']: return - self._vmops.destroy(instance, network_info, destroy_disks) + self._vmops.destroy(instance, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): @@ -670,7 +670,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, return _vmops = self._get_vmops_for_compute_node(instance['node']) - _vmops.destroy(instance, network_info, destroy_disks) + _vmops.destroy(instance, destroy_disks) def pause(self, instance): """Pause VM instance.""" diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 8cdaaaf469..056852ca83 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -828,7 +828,7 @@ def reboot(self, instance, network_info): self._session._wait_for_task(reset_task) LOG.debug("Did hard reboot of VM", instance=instance) - def _destroy_instance(self, instance, network_info, destroy_disks=True, + def _destroy_instance(self, instance, destroy_disks=True, instance_name=None): # Destroy a VM instance # Get the instance name. In some cases this may differ from the 'uuid', @@ -898,7 +898,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True, finally: vm_util.vm_ref_cache_delete(instance_name) - def destroy(self, instance, network_info, destroy_disks=True): + def destroy(self, instance, destroy_disks=True): """Destroy a VM instance. Steps followed for each VM are: @@ -915,11 +915,10 @@ def destroy(self, instance, network_info, destroy_disks=True): LOG.debug("Rescue VM destroyed", instance=instance) except Exception: rescue_name = instance['uuid'] + self._rescue_suffix - self._destroy_instance(instance, network_info, + self._destroy_instance(instance, destroy_disks=destroy_disks, instance_name=rescue_name) - self._destroy_instance(instance, network_info, - destroy_disks=destroy_disks) + self._destroy_instance(instance, destroy_disks=destroy_disks) LOG.debug("Instance destroyed", instance=instance) def pause(self, instance): @@ -1022,7 +1021,7 @@ def unrescue(self, instance, power_on=True): device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path) self._power_off_vm_ref(vm_rescue_ref) self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device) - self._destroy_instance(r_instance, None, instance_name=instance_name) + self._destroy_instance(r_instance, instance_name=instance_name) if power_on: vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref) From ebe02b5f92ae5f5743caa9699f699c1cb3ae54ee Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Tue, 1 Jul 2014 14:04:32 +0300 Subject: [PATCH 018/486] VMware: cleanup the constructors of the compute drivers The 'read_only' parameter was never used by the compute drivers. The 'scheme' parameter should be passed to the base constructor. Change-Id: I3abd40a50896664ee274f057ded8ec2460679c82 --- nova/virt/vmwareapi/driver.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 1e5f773213..e63c230377 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -104,7 +104,7 @@ def _do_deprecation_warning(self): 'removed in the Juno release. The VC driver will remain ' 'and continue to be supported.')) - def __init__(self, virtapi, read_only=False, scheme="https"): + def __init__(self, virtapi, scheme="https"): super(VMwareESXDriver, self).__init__(virtapi) self._do_deprecation_warning() @@ -387,8 +387,8 @@ def _do_deprecation_warning(self): # Driver validated by VMware's Minesweeper CI pass - def __init__(self, virtapi, read_only=False, scheme="https"): - super(VMwareVCDriver, self).__init__(virtapi) + def __init__(self, virtapi, scheme="https"): + super(VMwareVCDriver, self).__init__(virtapi, scheme) # Get the list of clusters to be used self._cluster_names = CONF.vmware.cluster_name From d6ca1cc02ab58615a6bb1b337db34dd06525112e Mon Sep 17 00:00:00 2001 From: "Leandro I. Costantino" Date: Fri, 30 May 2014 09:09:07 -0300 Subject: [PATCH 019/486] Move rebuild to conductor and add find host logic [*] Move rebuild instance to conductor task api will help in consolidating the logic for both evacuate instance and rebuild instance. Rebuild instance and evacuate instance uses the conductor task api for rebuild instance workflow. [*] Add logic required to support rebuild without host. (used by evacuate) [*] Refactor rebuild action to pass the instance host as argument, instead of None. Evacuate and Rebuild will behave the same way as today. Modifications for the new feature in API will be implemented in new patches. Partial Implements: blueprint find-host-and-evacuate-instance Change-Id: I4b1a9c2227574db7179441d531a49145f213ea87 Co-Authored-By: Juan M. Olle Co-Authored-By: Anuj Mathur Co-Authored-By: Navneet Kumar Co-Authored-By: Claxton Correya --- nova/compute/api.py | 33 +++++---- nova/conductor/api.py | 36 ++++++++++ nova/conductor/manager.py | 43 +++++++++++- nova/conductor/rpcapi.py | 15 ++++ nova/tests/compute/test_compute.py | 4 +- nova/tests/compute/test_compute_api.py | 8 +-- nova/tests/conductor/test_conductor.py | 95 ++++++++++++++++++++++++++ 7 files changed, 214 insertions(+), 20 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index f7ae2a8b57..cf973c3e49 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -2173,11 +2173,12 @@ def _reset_image_metadata(): self._record_action_start(context, instance, instance_actions.REBUILD) - self.compute_rpcapi.rebuild_instance(context, instance=instance, + self.compute_task_api.rebuild_instance(context, instance=instance, new_pass=admin_password, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, - preserve_ephemeral=preserve_ephemeral, kwargs=kwargs) + preserve_ephemeral=preserve_ephemeral, host=instance.host, + kwargs=kwargs) @wrap_check_policy @check_instance_lock @@ -3011,6 +3012,12 @@ def evacuate(self, context, instance, host, on_shared_storage, Checking vm compute host state, if the host not in expected_state, raising an exception. + + :param instance: The instance to evacuate + :param host: Target host. if not set, the scheduler will pick up one + :param on_shared_storage: True if instance files on shared storage + :param admin_password: password to set on rebuilt instance + """ LOG.debug('vm evacuation scheduled') inst_host = instance.host @@ -3025,17 +3032,17 @@ def evacuate(self, context, instance, host, on_shared_storage, instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.EVACUATE) - return self.compute_rpcapi.rebuild_instance(context, - instance=instance, - new_pass=admin_password, - injected_files=None, - image_ref=None, - orig_image_ref=None, - orig_sys_metadata=None, - bdms=None, - recreate=True, - on_shared_storage=on_shared_storage, - host=host) + return self.compute_task_api.rebuild_instance(context, + instance=instance, + new_pass=admin_password, + injected_files=None, + image_ref=None, + orig_image_ref=None, + orig_sys_metadata=None, + bdms=None, + recreate=True, + on_shared_storage=on_shared_storage, + host=host) def get_migrations(self, context, filters): """Get all migrations for the given filters.""" diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 576364c198..24600eee37 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -260,6 +260,24 @@ def unshelve_instance(self, context, instance): utils.spawn_n(self._manager.unshelve_instance, context, instance=instance) + def rebuild_instance(self, context, instance, orig_image_ref, image_ref, + injected_files, new_pass, orig_sys_metadata, + bdms, recreate=False, on_shared_storage=False, + preserve_ephemeral=False, host=None, kwargs=None): + # kwargs unused but required for cell compatibility. + utils.spawn_n(self._manager.rebuild_instance, context, + instance=instance, + new_pass=new_pass, + injected_files=injected_files, + image_ref=image_ref, + orig_image_ref=orig_image_ref, + orig_sys_metadata=orig_sys_metadata, + bdms=bdms, + recreate=recreate, + on_shared_storage=on_shared_storage, + host=host, + preserve_ephemeral=preserve_ephemeral) + class API(LocalAPI): """Conductor API that does updates via RPC to the ConductorManager.""" @@ -351,3 +369,21 @@ def build_instances(self, context, instances, image, filter_properties, def unshelve_instance(self, context, instance): self.conductor_compute_rpcapi.unshelve_instance(context, instance=instance) + + def rebuild_instance(self, context, instance, orig_image_ref, image_ref, + injected_files, new_pass, orig_sys_metadata, + bdms, recreate=False, on_shared_storage=False, + preserve_ephemeral=False, host=None, kwargs=None): + # kwargs unused but required for cell compatibility + self.conductor_compute_rpcapi.rebuild_instance(context, + instance=instance, + new_pass=new_pass, + injected_files=injected_files, + image_ref=image_ref, + orig_image_ref=orig_image_ref, + orig_sys_metadata=orig_sys_metadata, + bdms=bdms, + recreate=recreate, + on_shared_storage=on_shared_storage, + preserve_ephemeral=preserve_ephemeral, + host=host) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index f909efd2db..f076e13149 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -452,7 +452,7 @@ class ComputeTaskManager(base.Base): may involve coordinating activities on multiple compute nodes. """ - target = messaging.Target(namespace='compute_task', version='1.7') + target = messaging.Target(namespace='compute_task', version='1.8') def __init__(self): super(ComputeTaskManager, self).__init__() @@ -708,3 +708,44 @@ def safe_image_show(ctx, image_id): del(sys_meta[key]) instance.system_metadata = sys_meta instance.save() + + def rebuild_instance(self, context, instance, orig_image_ref, image_ref, + injected_files, new_pass, orig_sys_metadata, + bdms, recreate, on_shared_storage, + preserve_ephemeral=False, host=None): + + with compute_utils.EventReporter(context, 'rebuild_server', + instance.uuid): + if not host: + # NOTE(lcostantino): Retrieve scheduler filters for the + # instance when the feature is available + filter_properties = {'ignore_hosts': [instance.host]} + request_spec = scheduler_utils.build_request_spec(context, + image_ref, + [instance]) + try: + hosts = self.scheduler_rpcapi.select_destinations(context, + request_spec, + filter_properties) + host = hosts.pop(0)['host'] + except exception.NoValidHost as ex: + with excutils.save_and_reraise_exception(): + self._set_vm_state_and_notify(context, + 'rebuild_server', + {'vm_state': instance.vm_state, + 'task_state': None}, ex, request_spec) + LOG.warning(_("No valid host found for rebuild"), + instance=instance) + + self.compute_rpcapi.rebuild_instance(context, + instance=instance, + new_pass=new_pass, + injected_files=injected_files, + image_ref=image_ref, + orig_image_ref=orig_image_ref, + orig_sys_metadata=orig_sys_metadata, + bdms=bdms, + recreate=recreate, + on_shared_storage=on_shared_storage, + preserve_ephemeral=preserve_ephemeral, + host=host) diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 94974582b7..0972f89e9e 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -367,6 +367,7 @@ class ComputeTaskAPI(object): 1.5 - Added the leagacy_bdm parameter to build_instances 1.6 - Made migrate_server use instance objects 1.7 - Do not send block_device_mapping and legacy_bdm to build_instances + 1.8 - Add rebuild_instance """ def __init__(self): @@ -418,3 +419,17 @@ def build_instances(self, context, instances, image, filter_properties, def unshelve_instance(self, context, instance): cctxt = self.client.prepare(version='1.3') cctxt.cast(context, 'unshelve_instance', instance=instance) + + def rebuild_instance(self, ctxt, instance, new_pass, injected_files, + image_ref, orig_image_ref, orig_sys_metadata, bdms, + recreate=False, on_shared_storage=False, host=None, + preserve_ephemeral=False, kwargs=None): + cctxt = self.client.prepare(version='1.8') + cctxt.cast(ctxt, 'rebuild_instance', + instance=instance, new_pass=new_pass, + injected_files=injected_files, image_ref=image_ref, + orig_image_ref=orig_image_ref, + orig_sys_metadata=orig_sys_metadata, bdms=bdms, + recreate=recreate, on_shared_storage=on_shared_storage, + preserve_ephemeral=preserve_ephemeral, + host=host) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index daa81a7d91..67b397892a 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -7178,7 +7178,7 @@ def fake_rpc_rebuild(context, **kwargs): info['image_ref'] = kwargs['instance'].image_ref info['clean'] = kwargs['instance'].obj_what_changed() == set() - self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance', + self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance', fake_rpc_rebuild) image_ref = instance["image_ref"] + '-new_image_ref' @@ -9151,7 +9151,7 @@ def fake_rebuild_instance(*args, **kwargs): self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up', fake_service_is_up) - self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance', + self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance', fake_rebuild_instance) self.compute_api.evacuate(self.context.elevated(), instance, diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index db58a3732e..b0026e31c1 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -1795,7 +1795,7 @@ def test_rebuild(self, _record_action_start, _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms - with mock.patch.object(self.compute_api.compute_rpcapi, + with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) @@ -1805,7 +1805,7 @@ def test_rebuild(self, _record_action_start, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, - preserve_ephemeral=False, kwargs={}) + preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, @@ -1853,7 +1853,7 @@ def get_image(context, image_href): _get_image.side_effect = get_image bdm_get_by_instance_uuid.return_value = bdms - with mock.patch.object(self.compute_api.compute_rpcapi, + with mock.patch.object(self.compute_api.compute_task_api, 'rebuild_instance') as rebuild_instance: self.compute_api.rebuild(self.context, instance, new_image_href, admin_pass, files_to_inject) @@ -1863,7 +1863,7 @@ def get_image(context, image_href): injected_files=files_to_inject, image_ref=new_image_href, orig_image_ref=orig_image_href, orig_sys_metadata=orig_system_metadata, bdms=bdms, - preserve_ephemeral=False, kwargs={}) + preserve_ephemeral=False, host=instance.host, kwargs={}) _check_auto_disk_config.assert_called_once_with(image=new_image) _checks_for_create_and_rebuild.assert_called_once_with(self.context, diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index cea9866281..b79db734a2 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -51,6 +51,7 @@ from nova.tests import fake_instance from nova.tests import fake_notifier from nova.tests import fake_server_actions +from nova.tests import fake_utils from nova import utils @@ -84,6 +85,8 @@ def fake_deserialize_context(serializer, ctxt_dict): self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', fake_deserialize_context) + fake_utils.stub_out_utils_spawn_n(self.stubs) + def _create_fake_instance(self, params=None, type_name='m1.tiny'): if not params: params = {} @@ -1115,6 +1118,21 @@ def fake_deserialize_context(serializer, ctxt_dict): self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context', fake_deserialize_context) + def _prepare_rebuild_args(self, update_args=None): + rebuild_args = {'new_pass': 'admin_password', + 'injected_files': 'files_to_inject', + 'image_ref': 'image_ref', + 'orig_image_ref': 'orig_image_ref', + 'orig_sys_metadata': 'orig_sys_meta', + 'bdms': {}, + 'recreate': False, + 'on_shared_storage': False, + 'preserve_ephemeral': False, + 'host': 'compute-host'} + if update_args: + rebuild_args.update(update_args) + return rebuild_args + def test_live_migrate(self): inst = fake_instance.fake_db_instance() inst_obj = objects.Instance._from_db_object( @@ -1469,6 +1487,83 @@ def test_unshelve_instance_schedule_and_rebuild_volume_backed(self): system_metadata['shelved_host'] = 'fake-mini' self.conductor_manager.unshelve_instance(self.context, instance) + def test_rebuild_instance(self): + db_instance = jsonutils.to_primitive(self._create_fake_instance()) + inst_obj = objects.Instance.get_by_uuid(self.context, + db_instance['uuid']) + rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host}) + + with contextlib.nested( + mock.patch.object(self.conductor_manager.compute_rpcapi, + 'rebuild_instance'), + mock.patch.object(self.conductor_manager.scheduler_rpcapi, + 'select_destinations') + ) as (rebuild_mock, select_dest_mock): + self.conductor_manager.rebuild_instance(context=self.context, + instance=inst_obj, + **rebuild_args) + self.assertFalse(select_dest_mock.called) + rebuild_mock.assert_called_once_with(self.context, + instance=inst_obj, + **rebuild_args) + + def test_rebuild_instance_with_scheduler(self): + db_instance = jsonutils.to_primitive(self._create_fake_instance()) + inst_obj = objects.Instance.get_by_uuid(self.context, + db_instance['uuid']) + inst_obj.host = 'noselect' + rebuild_args = self._prepare_rebuild_args({'host': None}) + expected_host = 'thebesthost' + request_spec = {} + filter_properties = {'ignore_hosts': [(inst_obj.host)]} + + with contextlib.nested( + mock.patch.object(self.conductor_manager.compute_rpcapi, + 'rebuild_instance'), + mock.patch.object(self.conductor_manager.scheduler_rpcapi, + 'select_destinations', + return_value=[{'host': expected_host}]), + mock.patch('nova.scheduler.utils.build_request_spec', + return_value=request_spec) + ) as (rebuild_mock, select_dest_mock, bs_mock): + self.conductor_manager.rebuild_instance(context=self.context, + instance=inst_obj, + **rebuild_args) + select_dest_mock.assert_called_once_with(self.context, + request_spec, + filter_properties) + rebuild_args['host'] = expected_host + rebuild_mock.assert_called_once_with(self.context, + instance=inst_obj, + **rebuild_args) + + def test_rebuild_instance_with_scheduler_no_host(self): + db_instance = jsonutils.to_primitive(self._create_fake_instance()) + inst_obj = objects.Instance.get_by_uuid(self.context, + db_instance['uuid']) + inst_obj.host = 'noselect' + rebuild_args = self._prepare_rebuild_args({'host': None}) + request_spec = {} + filter_properties = {'ignore_hosts': [(inst_obj.host)]} + + with contextlib.nested( + mock.patch.object(self.conductor_manager.compute_rpcapi, + 'rebuild_instance'), + mock.patch.object(self.conductor_manager.scheduler_rpcapi, + 'select_destinations', + side_effect=exc.NoValidHost(reason='')), + mock.patch('nova.scheduler.utils.build_request_spec', + return_value=request_spec) + ) as (rebuild_mock, select_dest_mock, bs_mock): + self.assertRaises(exc.NoValidHost, + self.conductor_manager.rebuild_instance, + context=self.context, instance=inst_obj, + **rebuild_args) + select_dest_mock.assert_called_once_with(self.context, + request_spec, + filter_properties) + self.assertFalse(rebuild_mock.called) + class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase): """ComputeTaskManager Tests.""" From 6ca97b1cc1019e5f43e72001f89a1305f8317557 Mon Sep 17 00:00:00 2001 From: "Leandro I. Costantino" Date: Thu, 27 Feb 2014 09:08:32 -0300 Subject: [PATCH 020/486] Add APIv3 support to make host optional on evacuate. DocImpact: The evacuate target host is now optional. If 'host' field is not sent in the request, the scheduler will determine the target host. This will include nova client changes ( on the proper commit ) to support this new optional parameter. Implements: blueprint find-host-and-evacuate-instance Change-Id: I907ea4ef39cfad50a48cb80bceba217fdf482f6e Co-Authored-By: Juan M. Olle Co-Authored-By: Andres Buraschi Co-Authored-By: Anuj Mathur Co-Authored-By: Navneet Kumar Co-Authored-By: Claxton Correya --- .../server-evacuate-find-host-req.json | 6 ++ .../server-evacuate-find-host-resp.json | 3 + .../openstack/compute/plugins/v3/evacuate.py | 13 ++-- .../openstack/compute/schemas/v3/evacuate.py | 2 +- .../compute/plugins/v3/test_evacuate.py | 8 ++- .../server-evacuate-find-host-req.json.tpl | 6 ++ .../server-evacuate-find-host-resp.json.tpl | 3 + nova/tests/integrated/v3/test_evacuate.py | 72 ++++++++++++------- 8 files changed, 80 insertions(+), 33 deletions(-) create mode 100644 doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json create mode 100644 doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json create mode 100644 nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl create mode 100644 nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl diff --git a/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json new file mode 100644 index 0000000000..a8a2162381 --- /dev/null +++ b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json @@ -0,0 +1,6 @@ +{ + "evacuate": { + "admin_password": "MySecretPass", + "on_shared_storage": "False" + } +} diff --git a/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json new file mode 100644 index 0000000000..fcd865c043 --- /dev/null +++ b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json @@ -0,0 +1,3 @@ +{ + "admin_password": "MySecretPass" +} \ No newline at end of file diff --git a/nova/api/openstack/compute/plugins/v3/evacuate.py b/nova/api/openstack/compute/plugins/v3/evacuate.py index dc0f39e6ad..500e5f07da 100644 --- a/nova/api/openstack/compute/plugins/v3/evacuate.py +++ b/nova/api/openstack/compute/plugins/v3/evacuate.py @@ -55,7 +55,7 @@ def _evacuate(self, req, id, body): authorize(context) evacuate_body = body["evacuate"] - host = evacuate_body["host"] + host = evacuate_body.get("host") on_shared_storage = strutils.bool_from_string( evacuate_body["on_shared_storage"]) @@ -71,11 +71,12 @@ def _evacuate(self, req, id, body): elif not on_shared_storage: password = utils.generate_password() - try: - self.host_api.service_get_by_compute_host(context, host) - except exception.NotFound: - msg = _("Compute host %s not found.") % host - raise exc.HTTPNotFound(explanation=msg) + if host is not None: + try: + self.host_api.service_get_by_compute_host(context, host) + except exception.NotFound: + msg = _("Compute host %s not found.") % host + raise exc.HTTPNotFound(explanation=msg) instance = common.get_instance(self.compute_api, context, id, want_objects=True) diff --git a/nova/api/openstack/compute/schemas/v3/evacuate.py b/nova/api/openstack/compute/schemas/v3/evacuate.py index c48b45560a..a71e995dc6 100644 --- a/nova/api/openstack/compute/schemas/v3/evacuate.py +++ b/nova/api/openstack/compute/schemas/v3/evacuate.py @@ -25,7 +25,7 @@ 'on_shared_storage': parameter_types.boolean, 'admin_password': parameter_types.admin_password, }, - 'required': ['host', 'on_shared_storage'], + 'required': ['on_shared_storage'], 'additionalProperties': False, }, }, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py b/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py index 2f1f1ffc4d..cc168739ef 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py @@ -14,6 +14,7 @@ import uuid +import mock from oslo.config import cfg import webob @@ -82,11 +83,14 @@ def _gen_request_with_app(self, json_load, is_admin=True): return req, app - def test_evacuate_instance_with_no_target(self): + @mock.patch('nova.compute.api.API.evacuate') + def test_evacuate_instance_with_no_target(self, evacuate_mock): req, app = self._gen_request_with_app({'on_shared_storage': 'False', 'admin_password': 'MyNewPass'}) res = req.get_response(app) - self.assertEqual(400, res.status_int) + self.assertEqual(202, res.status_int) + evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None, + mock.ANY, mock.ANY) def test_evacuate_instance_with_empty_host(self): req, app = self._gen_request_with_app({'host': '', diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl new file mode 100644 index 0000000000..7ba9398ba6 --- /dev/null +++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl @@ -0,0 +1,6 @@ +{ + "evacuate": { + "admin_password": "%(adminPass)s", + "on_shared_storage": "%(onSharedStorage)s" + } +} diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl new file mode 100644 index 0000000000..e6d6ad9ed1 --- /dev/null +++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl @@ -0,0 +1,3 @@ +{ + "admin_password": "%(password)s" +} diff --git a/nova/tests/integrated/v3/test_evacuate.py b/nova/tests/integrated/v3/test_evacuate.py index e7a5931697..b4666b0263 100644 --- a/nova/tests/integrated/v3/test_evacuate.py +++ b/nova/tests/integrated/v3/test_evacuate.py @@ -13,8 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from nova.compute import api as compute_api -from nova.compute import rpcapi as compute_rpcapi +from nova.compute import manager as compute_manager from nova.servicegroup import api as service_group_api from nova.tests.integrated.v3 import test_servers @@ -22,15 +24,9 @@ class EvacuateJsonTest(test_servers.ServersSampleBase): extension_name = "os-evacuate" - def test_server_evacuate(self): - uuid = self._post_server() - - # Note (wingwj): The host can't be the same one. - req_subs = { - 'host': 'testHost', - "adminPass": "MySecretPass", - "onSharedStorage": 'False' - } + def _test_evacuate(self, req_subs, server_req, server_resp, + expected_resp_code): + self.uuid = self._post_server() def fake_service_is_up(self, service): """Simulate validation of instance host is down.""" @@ -44,24 +40,52 @@ def fake_service_get_by_compute_host(self, context, host): 'zone': 'nova' } - def fake_rebuild_instance(_self, ctxt, instance, new_pass, - injected_files, image_ref, orig_image_ref, - orig_sys_metadata, bdms, recreate=False, - on_shared_storage=False, host=None, - preserve_ephemeral=False, kwargs=None): - """Simulate that given parameters are correct.""" - self.assertEqual(uuid, instance["uuid"]) - self.assertEqual(new_pass, "MySecretPass") - self.assertEqual(host, "testHost") + def fake_check_instance_exists(self, context, instance): + """Simulate validation of instance does not exist.""" + return False self.stubs.Set(service_group_api.API, 'service_is_up', fake_service_is_up) self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host', fake_service_get_by_compute_host) - self.stubs.Set(compute_rpcapi.ComputeAPI, 'rebuild_instance', - fake_rebuild_instance) + self.stubs.Set(compute_manager.ComputeManager, + '_check_instance_exists', + fake_check_instance_exists) - response = self._do_post('servers/%s/action' % uuid, - 'server-evacuate-req', req_subs) + response = self._do_post('servers/%s/action' % self.uuid, + server_req, req_subs) subs = self._get_regexes() - self._verify_response('server-evacuate-resp', subs, response, 202) + self._verify_response(server_resp, subs, response, expected_resp_code) + + @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') + def test_server_evacuate(self, rebuild_mock): + # Note (wingwj): The host can't be the same one + req_subs = { + 'host': 'testHost', + "adminPass": "MySecretPass", + "onSharedStorage": 'False' + } + self._test_evacuate(req_subs, 'server-evacuate-req', + 'server-evacuate-resp', 202) + rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, + orig_image_ref=mock.ANY, image_ref=mock.ANY, + injected_files=mock.ANY, new_pass="MySecretPass", + orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, + on_shared_storage=False, preserve_ephemeral=mock.ANY, + host='testHost') + + @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') + def test_server_evacuate_find_host(self, rebuild_mock): + req_subs = { + "adminPass": "MySecretPass", + "onSharedStorage": 'False' + } + self._test_evacuate(req_subs, 'server-evacuate-find-host-req', + 'server-evacuate-find-host-resp', 202) + + rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, + orig_image_ref=mock.ANY, image_ref=mock.ANY, + injected_files=mock.ANY, new_pass="MySecretPass", + orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, + on_shared_storage=False, preserve_ephemeral=mock.ANY, + host=None) From 0d781d41fef329294891fdda0c1dac4add5ce7c3 Mon Sep 17 00:00:00 2001 From: Shraddha Pandhe Date: Wed, 25 Jun 2014 20:56:33 +0000 Subject: [PATCH 021/486] Libvirt: Added suffix to configdrive_path required for rescue It was observed that during nova rescue, nova failed to create a rescue disk for disk.config. The reason being that .rescue suffix was missing from the configdrive path. It was working till Havana but the functionality broke in Icehouse. This commit fixes the suffix problem. Unittests have been added to verify following scenarios: 1. Make sure that .rescue disks are created when not using config drive 2. Make sure that .rescue disks are created when using config drive Closes-Bug: #1334024 Change-Id: I87449ffddd047cb84b7b881757ea4c29927b95da --- nova/tests/virt/libvirt/fake_imagebackend.py | 15 ++ nova/tests/virt/libvirt/test_driver.py | 154 ++++++++++++++++++- nova/virt/libvirt/driver.py | 6 +- 3 files changed, 171 insertions(+), 4 deletions(-) diff --git a/nova/tests/virt/libvirt/fake_imagebackend.py b/nova/tests/virt/libvirt/fake_imagebackend.py index ef09fdcc52..0946b1a6db 100644 --- a/nova/tests/virt/libvirt/fake_imagebackend.py +++ b/nova/tests/virt/libvirt/fake_imagebackend.py @@ -56,3 +56,18 @@ def snapshot(self, path, image_type=''): #NOTE(bfilippov): this is done in favor for # snapshot tests in test_libvirt.LibvirtConnTestCase return imagebackend.Backend(True).snapshot(path, image_type) + + +class Raw(imagebackend.Image): + # NOTE(spandhe) Added for test_rescue and test_rescue_config_drive + def __init__(self, instance=None, disk_name=None, path=None): + pass + + def _get_driver_format(self): + pass + + def correct_format(self): + pass + + def create_image(self, prepare_template, base, size, *args, **kwargs): + pass diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index af133df03a..c128b21ad2 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -32,6 +32,7 @@ from xml.dom import minidom from nova.api.ec2 import cloud +from nova.api.metadata import base as instance_metadata from nova.compute import flavors from nova.compute import manager from nova.compute import power_state @@ -61,6 +62,7 @@ import nova.tests.image.fake from nova.tests import matchers from nova.tests.objects import test_pci_device +from nova.tests.virt.libvirt import fake_imagebackend from nova.tests.virt.libvirt import fake_libvirt_utils from nova.tests.virt.libvirt import fakelibvirt from nova import utils @@ -5199,7 +5201,7 @@ def _test_destroy_removes_disk(self, volume_fail=False): ).AndReturn(instance) self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol - ).AndReturn(vol['block_device_mapping']) + ).AndReturn(vol['block_device_mapping']) self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, "volume_driver_method") if volume_fail: @@ -9281,6 +9283,156 @@ def test_detach_interface_with_shutdown_instance(self): 'detach_interface', power_state.SHUTDOWN, expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG)) + def test_rescue(self): + instance = self._create_instance() + instance.config_drive = False + dummyxml = ("instance-0000000a" + "" + "" + "" + "" + "" + "" + "" + "") + network_info = _fake_network_info(self.stubs, 1) + + self.mox.StubOutWithMock(self.libvirtconnection, + '_get_existing_domain_xml') + self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') + self.mox.StubOutWithMock(imagebackend.Backend, 'image') + self.mox.StubOutWithMock(imagebackend.Image, 'cache') + self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml') + self.mox.StubOutWithMock(self.libvirtconnection, '_destroy') + self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain') + + self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(), + mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) + libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) + libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()) + imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' + ).AndReturn(fake_imagebackend.Raw()) + imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' + ).AndReturn(fake_imagebackend.Raw()) + imagebackend.Backend.image(instance, 'disk.rescue', 'default' + ).AndReturn(fake_imagebackend.Raw()) + imagebackend.Image.cache(context=mox.IgnoreArg(), + fetch_func=mox.IgnoreArg(), + filename=mox.IgnoreArg(), + image_id=mox.IgnoreArg(), + project_id=mox.IgnoreArg(), + user_id=mox.IgnoreArg()).MultipleTimes() + + imagebackend.Image.cache(context=mox.IgnoreArg(), + fetch_func=mox.IgnoreArg(), + filename=mox.IgnoreArg(), + image_id=mox.IgnoreArg(), + project_id=mox.IgnoreArg(), + size=None, user_id=mox.IgnoreArg()) + + image_meta = {'id': 'fake', 'name': 'fake'} + self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance, + network_info, mox.IgnoreArg(), + image_meta, rescue=mox.IgnoreArg(), + write_to_disk=mox.IgnoreArg() + ).AndReturn(dummyxml) + + self.libvirtconnection._destroy(instance) + self.libvirtconnection._create_domain(mox.IgnoreArg()) + + self.mox.ReplayAll() + + rescue_password = 'fake_password' + + self.libvirtconnection.rescue(self.context, instance, + network_info, image_meta, rescue_password) + self.mox.VerifyAll() + + def test_rescue_config_drive(self): + instance = self._create_instance() + uuid = instance.uuid + configdrive_path = uuid + '/disk.config.rescue' + dummyxml = ("instance-0000000a" + "" + "" + "" + "" + "" + "" + "" + "") + network_info = _fake_network_info(self.stubs, 1) + + self.mox.StubOutWithMock(self.libvirtconnection, + '_get_existing_domain_xml') + self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') + self.mox.StubOutWithMock(imagebackend.Backend, 'image') + self.mox.StubOutWithMock(imagebackend.Image, 'cache') + self.mox.StubOutWithMock(instance_metadata.InstanceMetadata, + '__init__') + self.mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder') + self.mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive') + self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml') + self.mox.StubOutWithMock(self.libvirtconnection, '_destroy') + self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain') + + self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(), + mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) + libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) + libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()) + + imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' + ).AndReturn(fake_imagebackend.Raw()) + imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' + ).AndReturn(fake_imagebackend.Raw()) + imagebackend.Backend.image(instance, 'disk.rescue', 'default' + ).AndReturn(fake_imagebackend.Raw()) + + imagebackend.Image.cache(context=mox.IgnoreArg(), + fetch_func=mox.IgnoreArg(), + filename=mox.IgnoreArg(), + image_id=mox.IgnoreArg(), + project_id=mox.IgnoreArg(), + user_id=mox.IgnoreArg()).MultipleTimes() + + imagebackend.Image.cache(context=mox.IgnoreArg(), + fetch_func=mox.IgnoreArg(), + filename=mox.IgnoreArg(), + image_id=mox.IgnoreArg(), + project_id=mox.IgnoreArg(), + size=None, user_id=mox.IgnoreArg()) + + instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(), + content=mox.IgnoreArg(), + extra_md=mox.IgnoreArg(), + network_info=mox.IgnoreArg()) + cdb = self.mox.CreateMockAnything() + m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg()) + m.AndReturn(cdb) + # __enter__ and __exit__ are required by "with" + cdb.__enter__().AndReturn(cdb) + cdb.make_drive(mox.Regex(configdrive_path)) + cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg() + ).AndReturn(None) + image_meta = {'id': 'fake', 'name': 'fake'} + self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance, + network_info, mox.IgnoreArg(), + image_meta, rescue=mox.IgnoreArg(), + write_to_disk=mox.IgnoreArg() + ).AndReturn(dummyxml) + self.libvirtconnection._destroy(instance) + self.libvirtconnection._create_domain(mox.IgnoreArg()) + + self.mox.ReplayAll() + + rescue_password = 'fake_password' + + self.libvirtconnection.rescue(self.context, instance, network_info, + image_meta, rescue_password) + self.mox.VerifyAll() + class LibvirtVolumeUsageTestCase(test.TestCase): """Test for LibvirtDriver.get_all_volume_usage.""" diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 033d96f7e2..0680717ace 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2457,9 +2457,9 @@ def _get_console_log_path(instance): 'console.log') @staticmethod - def _get_disk_config_path(instance): + def _get_disk_config_path(instance, suffix=''): return os.path.join(libvirt_utils.get_instance_path(instance), - 'disk.config') + 'disk.config' + suffix) def _chown_console_log_for_instance(self, instance): console_log = self._get_console_log_path(instance) @@ -2682,7 +2682,7 @@ def raw(fname): inst_md = instance_metadata.InstanceMetadata(instance, content=files, extra_md=extra_md, network_info=network_info) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: - configdrive_path = self._get_disk_config_path(instance) + configdrive_path = self._get_disk_config_path(instance, suffix) LOG.info(_LI('Creating config drive at %(path)s'), {'path': configdrive_path}, instance=instance) From 4f8185549dfe11eb1ce405711593baa1528045ea Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Sat, 12 Apr 2014 13:19:11 +0800 Subject: [PATCH 022/486] Update port binding when unshelve instance When unshelve instance, it should update the attached port also. This patch invoke network_api.migrate_instance_finish to update the port's binding info. Change-Id: Iaa77163dde5494154d67ef392142669d70c3e643 Closes-Bug: #1306922 --- nova/compute/manager.py | 4 +++- nova/tests/compute/test_shelve.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 28ce879a5e..fa0260c95f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3960,7 +3960,6 @@ def _unshelve_instance(self, context, instance, image, filter_properties, instance.task_state = task_states.SPAWNING instance.save() - network_info = self._get_instance_nw_info(context, instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._prep_block_device(context, instance, bdms) @@ -3978,6 +3977,9 @@ def _unshelve_instance(self, context, instance, image, filter_properties, shelved_image_ref = instance.image_ref instance.image_ref = image['id'] + self.network_api.migrate_instance_finish(context, instance, + {'source_compute': '', 'dest_compute': self.host}) + network_info = self._get_instance_nw_info(context, instance) try: with rt.instance_claim(context, instance, limits): self.driver.spawn(context, instance, image, injected_files=[], diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py index ffae1f87a8..88ad6aba93 100644 --- a/nova/tests/compute/test_shelve.py +++ b/nova/tests/compute/test_shelve.py @@ -194,6 +194,8 @@ def test_unshelve(self): self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.rt, 'instance_claim') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') + self.mox.StubOutWithMock(self.compute.network_api, + 'migrate_instance_finish') self.deleted_image_id = None @@ -218,6 +220,9 @@ def fake_claim(context, instance, limits): mox.IgnoreArg()).AndReturn('fake_bdm') db_instance['key_data'] = None db_instance['auto_disk_config'] = None + self.compute.network_api.migrate_instance_finish( + self.context, instance, {'source_compute': '', + 'dest_compute': self.compute.host}) self.compute.driver.spawn(self.context, instance, image, injected_files=[], admin_password=None, network_info=[], @@ -276,6 +281,8 @@ def test_unshelve_volume_backed(self): self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.rt, 'instance_claim') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') + self.mox.StubOutWithMock(self.compute.network_api, + 'migrate_instance_finish') self.compute._notify_about_instance_usage(self.context, instance, 'unshelve.start') @@ -288,6 +295,9 @@ def test_unshelve_volume_backed(self): mox.IgnoreArg()).AndReturn('fake_bdm') db_instance['key_data'] = None db_instance['auto_disk_config'] = None + self.compute.network_api.migrate_instance_finish( + self.context, instance, {'source_compute': '', + 'dest_compute': self.compute.host}) self.rt.instance_claim(self.context, instance, limits).AndReturn( claims.Claim(db_instance, self.rt, _fake_resources())) self.compute.driver.spawn(self.context, instance, None, From 1b35a3b263f47558e2e7791e27810741881eea43 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 12 May 2014 20:27:43 +0800 Subject: [PATCH 023/486] Change the comments of SOFT_DELETED race condition _reclaim_queued_deletes will only find instances in SOFT_DELETED state and delete them if they are only old enough. The quotas will be committed when the instance was soft-deleted so that resource can be used right after the soft-delete operation. There are some concern about the quota inconsistency for the operation, The only case that the quota might be inconsistent is the compute node died between set instance state to SOFT_DELETED and quota commit to DB; when compute node start again it will have no idea the reservation is committed or not or even expired, since it's a rare case, so marked as todo. There are some alternatives if the problem need to be fixed, e.g. able to find reservation when nova compute restart, so we can double check whether the quota is committed or not. Partial-Bug: #1296414 Change-Id: Idf9c179b2dd439462a646568ffd5098cd5d7851f --- nova/compute/manager.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a3f2ceedfe..fe978cfe97 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -5490,11 +5490,12 @@ def _reclaim_queued_deletes(self, context): LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...") return - # FIXME(comstud): Dummy quota object for now. See bug 1296414. - # We have potential for inconsistency if we decide here to not - # update quotas. _delete_instance() should determine whether or - # not to update quotas based on if instance is in a SOFT_DELETED - # state or not. + # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414. + # The only case that the quota might be inconsistent is + # the compute node died between set instance state to SOFT_DELETED + # and quota commit to DB. When compute node starts again + # it will have no idea the reservation is committed or not or even + # expired, since it's a rare case, so marked as todo. quotas = quotas_obj.Quotas.from_reservations(context, None) filters = {'vm_state': vm_states.SOFT_DELETED, From 284c9dc28e70afcee056a12d0116d0da06f0d0f9 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Thu, 3 Jul 2014 00:53:49 +0800 Subject: [PATCH 024/486] Add debug log for core_filter Sometimes operator need information why the host doesn't pass the check of scheduler, this patch adds information for core filter if virtual cpu is not enough on the host. Change-Id: Ie05d9184435a4fc4cff1af26812c01a3cc226fff Partial-Bug: #1301830 --- nova/scheduler/filters/core_filter.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py index 45d97b9ba7..ebcaa92c3e 100644 --- a/nova/scheduler/filters/core_filter.py +++ b/nova/scheduler/filters/core_filter.py @@ -61,7 +61,17 @@ def host_passes(self, host_state, filter_properties): if vcpus_total > 0: host_state.limits['vcpu'] = vcpus_total - return (vcpus_total - host_state.vcpus_used) >= instance_vcpus + free_vcpus = vcpus_total - host_state.vcpus_used + if free_vcpus < instance_vcpus: + LOG.debug("%(host_state)s does not have %(instance_vcpus)d " + "usable vcpus, it only has %(free_vcpus)d usable " + "vcpus", + {'host_state': host_state, + 'instance_vcpus': instance_vcpus, + 'free_vcpus': free_vcpus}) + return False + + return True class CoreFilter(BaseCoreFilter): From b1cde33175bc555e1af822894e8c3f981fbb8ef6 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Thu, 19 Jun 2014 17:44:31 +0800 Subject: [PATCH 025/486] check ephemeral disk format at libvirt before use Add ephemeral disk format at libvirt. There is no check before so it will be used directly. This patch adds valid check function and validate it before eph disk was used in virt layer. Change-Id: I1934573e79f126e725cef4848b15c11a595161c9 Closes-Bug: #1293880 --- nova/tests/virt/libvirt/test_driver.py | 40 ++++++++++++++++++++++++++ nova/virt/libvirt/driver.py | 10 +++++++ 2 files changed, 50 insertions(+) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 88a7126871..57c54668c1 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -4904,6 +4904,34 @@ def fake_get_info(instance): ] self.assertEqual(gotFiles, wantFiles) + def test_create_ephemeral_specified_fs_not_valid(self): + CONF.set_override('default_ephemeral_format', 'ext4') + ephemerals = [{'device_type': 'disk', + 'disk_bus': 'virtio', + 'device_name': '/dev/vdb', + 'guest_format': 'dummy', + 'size': 1}] + block_device_info = { + 'ephemerals': ephemerals} + instance_ref = self.test_instance + instance_ref['image_ref'] = 1 + instance = db.instance_create(self.context, instance_ref) + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + image_meta = {'id': instance['image_ref']} + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance, + None, + image_meta) + disk_info['mapping'].pop('disk.local') + + with contextlib.nested( + mock.patch.object(utils, 'execute'), + mock.patch.object(conn, 'get_info'), + mock.patch.object(conn, '_create_domain_and_network')): + self.assertRaises(exception.InvalidBDMFormat, conn._create_image, + context, instance, disk_info['mapping'], + block_device_info=block_device_info) + def test_create_ephemeral_default(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') @@ -6919,6 +6947,18 @@ def test_default_device_names_for_instance(self): ephemerals, swap, block_device_mapping) + def test_is_supported_fs_format(self): + supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3, + disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS] + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + for fs in supported_fs: + self.assertTrue(conn.is_supported_fs_format(fs)) + + supported_fs = ['', 'dummy'] + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + for fs in supported_fs: + self.assertFalse(conn.is_supported_fs_format(fs)) + def test_hypervisor_hostname_caching(self): # Make sure that the first hostname is always returned class FakeConn(object): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 33077d1cd8..425b2efa69 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2662,6 +2662,12 @@ def raw(fname): for idx, eph in enumerate(driver.block_device_info_get_ephemerals( block_device_info)): disk_image = image(blockinfo.get_eph_disk(idx)) + + specified_fs = eph.get('guest_format') + if specified_fs and not self.is_supported_fs_format(specified_fs): + msg = _("%s format is not supported") % specified_fs + raise exception.InvalidBDMFormat(details=msg) + fn = functools.partial(self._create_ephemeral, fs_label='ephemeral%d' % idx, os_type=instance["os_type"], @@ -5287,6 +5293,10 @@ def default_device_names_for_instance(self, instance, root_device_name, ephemerals, swap, block_device_mapping) + def is_supported_fs_format(self, fs_type): + return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3, + disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS] + class HostState(object): """Manages information about the compute node through libvirt.""" From 54458334136b284bb0c45373e7cacf5c1fa0ab99 Mon Sep 17 00:00:00 2001 From: Brad Pokorny Date: Fri, 16 May 2014 03:59:36 +0000 Subject: [PATCH 026/486] Mask node.session.auth.password in volume.py _run_iscsiadm debug logs The iscsi_command object passed to _run_iscsiadm can contain passwords that get logged at debug level, so we need to sanitize the message getting logged. Adds a test to ensure the logged message is properly sanitized. Closes-Bug: #1320028 Change-Id: I33f1a5b698368504721b41e56266162a713b3ce6 --- nova/tests/virt/libvirt/test_volume.py | 20 ++++++++++++++++++++ nova/virt/libvirt/volume.py | 7 +++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/nova/tests/virt/libvirt/test_volume.py b/nova/tests/virt/libvirt/test_volume.py index 65c3456b88..e022dc3fb6 100644 --- a/nova/tests/virt/libvirt/test_volume.py +++ b/nova/tests/virt/libvirt/test_volume.py @@ -343,6 +343,26 @@ def test_libvirt_iscsi_driver_disconnect_multipath_error(self): ['-f', 'fake-multipath-devname'], check_exit_code=[0, 1]) + def test_sanitize_log_run_iscsiadm(self): + # Tests that the parameters to the _run_iscsiadm function are sanitized + # for passwords when logged. + def fake_debug(*args, **kwargs): + self.assertIn('node.session.auth.password', args[0]) + self.assertNotIn('scrubme', args[0]) + + libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn) + connection_info = self.iscsi_connection(self.vol, self.location, + self.iqn) + iscsi_properties = connection_info['data'] + with mock.patch.object(volume.LOG, 'debug', + side_effect=fake_debug) as debug_mock: + libvirt_driver._iscsiadm_update(iscsi_properties, + 'node.session.auth.password', + 'scrubme') + # we don't care what the log message is, we just want to make sure + # our stub method is called which asserts the password is scrubbed + self.assertTrue(debug_mock.called) + def iser_connection(self, volume, location, iqn): return { 'driver_volume_type': 'iser', diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index 1f4f85cb03..775822d4ef 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -231,8 +231,11 @@ def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): '-p', iscsi_properties['target_portal'], *iscsi_command, run_as_root=True, check_exit_code=check_exit_code) - LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s", - {'command': iscsi_command, 'out': out, 'err': err}) + msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' % + {'command': iscsi_command, 'out': out, 'err': err}) + # NOTE(bpokorny): iscsi_command can contain passwords so we need to + # sanitize the password in the message. + LOG.debug(logging.mask_password(msg)) return (out, err) def _iscsiadm_update(self, iscsi_properties, property_key, property_value, From 9cea7ad36c3512b854beda5bd226a57e65884d7a Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Thu, 3 Jul 2014 12:38:39 +0200 Subject: [PATCH 027/486] Fix virt BDM __setattr__ and __getattr__ Previously we would only allow these to work on fields that are found in the _proxy_as_attr collection of the object and we would unconditionally raise an AttributeError otherwise. This behaviour is too restrictive as it prevents us from using mock on these objects properly, Instead we now delegate back to the parent class for a more natural behaviour. Change-Id: I5c221eef8f05c66ac2e2fb4abf645748407462c0 --- nova/tests/virt/test_block_device.py | 2 -- nova/virt/block_device.py | 6 ++---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py index ae6d16ed1e..f34d18b120 100644 --- a/nova/tests/virt/test_block_device.py +++ b/nova/tests/virt/test_block_device.py @@ -195,8 +195,6 @@ def _test_driver_device(self, name): for passthru in test_bdm._proxy_as_attr: self.assertEqual(getattr(test_bdm, passthru), getattr(test_bdm._bdm_obj, passthru)) - for no_pass in set(db_bdm.keys()) - test_bdm._proxy_as_attr: - self.assertRaises(AttributeError, getattr, test_bdm, no_pass) # Make sure that all others raise _invalidType for other_name, cls in self.driver_classes.iteritems(): diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index 67b3064021..098335d57b 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -101,15 +101,13 @@ def __getattr__(self, name): if name in self._proxy_as_attr: return getattr(self._bdm_obj, name) else: - raise AttributeError("Cannot access %s on DriverBlockDevice " - "class" % name) + super(DriverBlockDevice, self).__getattr__(name) def __setattr__(self, name, value): if name in self._proxy_as_attr: return setattr(self._bdm_obj, name, value) else: - raise AttributeError("Cannot access %s on DriverBlockDevice " - "class" % name) + super(DriverBlockDevice, self).__setattr__(name, value) def _transform(self): """Transform bdm to the format that is passed to drivers.""" From d19c75c19d2de8b20e82e6de9413ba53671ad7fb Mon Sep 17 00:00:00 2001 From: Thang Pham Date: Thu, 5 Jun 2014 11:43:18 -0400 Subject: [PATCH 028/486] libvirt: Save device_path in connection_info when booting from volume If you boot an instance from a volume and later terminate it, the libvirt volume driver disconnect_volume method does not have the 'device_path' key in connection_info['data']. However, if you attach a volume to an existing instance and then detach it, the disconnect_volume method would have the 'device_path' key in connection_info['data']. Having the 'device_path' key would be useful for some volume drivers to determine the device path of the volume. This patch saves the 'device_path' in connection_info['data'] when _create_domain_and_network is called, so it could be later used. Change-Id: I8ebb5f3c2e7a81b11d776f8c0a15f3491ed273be Closes-Bug: #1291007 --- nova/tests/virt/libvirt/test_driver.py | 82 ++++++++++++++++++++++++++ nova/virt/libvirt/driver.py | 5 +- 2 files changed, 85 insertions(+), 2 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index d2e5a95b13..bf457adb2a 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -7319,6 +7319,88 @@ def test_create_with_network_events_neutron_failed_fatal_error( def test_create_with_network_events_non_neutron(self, is_neutron): self._test_create_with_network_events() + @mock.patch('nova.volume.encryptors.get_encryption_metadata') + @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') + def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) + mock_dom = mock.MagicMock() + mock_encryption_meta = mock.MagicMock() + get_encryption_metadata.return_value = mock_encryption_meta + + fake_xml = """ + + instance-00000001 + 1048576 + 1 + + + + + + + + + """ + fake_volume_id = "fake-volume-id" + connection_info = {"driver_volume_type": "fake", + "data": {"access_mode": "rw", + "volume_id": fake_volume_id}} + + def fake_getitem(*args, **kwargs): + fake_bdm = {'connection_info': connection_info, + 'mount_device': '/dev/vda'} + return fake_bdm.get(args[0]) + + mock_volume = mock.MagicMock() + mock_volume.__getitem__.side_effect = fake_getitem + bdi = {'block_device_mapping': [mock_volume]} + network_info = [network_model.VIF(id='1'), + network_model.VIF(id='2', active=True)] + disk_info = {'bus': 'virtio', 'type': 'file', + 'dev': 'vda'} + get_info_from_bdm.return_value = disk_info + + with contextlib.nested( + mock.patch.object(conn, '_connect_volume'), + mock.patch.object(conn, '_get_volume_encryptor'), + mock.patch.object(conn, 'plug_vifs'), + mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'), + mock.patch.object(conn.firewall_driver, + 'prepare_instance_filter'), + mock.patch.object(conn, '_create_domain'), + mock.patch.object(conn.firewall_driver, 'apply_instance_filter'), + ) as (connect_volume, get_volume_encryptor, plug_vifs, + setup_basic_filtering, prepare_instance_filter, create_domain, + apply_instance_filter): + connect_volume.return_value = mock.MagicMock( + source_path='/path/fake-volume1') + create_domain.return_value = mock_dom + + domain = conn._create_domain_and_network(self.context, fake_xml, + instance, network_info, + block_device_info=bdi) + + get_info_from_bdm.assert_called_once_with(CONF.libvirt.virt_type, + mock_volume) + connect_volume.assert_called_once_with(connection_info, disk_info) + self.assertEqual(connection_info['data']['device_path'], + '/path/fake-volume1') + mock_volume.save.assert_called_once_with(self.context) + get_encryption_metadata.assert_called_once_with(self.context, + conn._volume_api, fake_volume_id, connection_info) + get_volume_encryptor.assert_called_once_with(connection_info, + mock_encryption_meta) + plug_vifs.assert_called_once_with(instance, network_info) + setup_basic_filtering.assert_called_once_with(instance, + network_info) + prepare_instance_filter.assert_called_once_with(instance, + network_info) + create_domain.assert_called_once_with(fake_xml, instance=instance, + launch_flags=0, + power_on=True) + self.assertEqual(mock_dom, domain) + def test_get_neutron_events(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 7d337a0b69..06f94e6955 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -3604,12 +3604,13 @@ def _create_domain_and_network(self, context, xml, instance, network_info, conf = self._connect_volume(connection_info, disk_info) # cache device_path in connection_info -- required by encryptors - if (not reboot and 'data' in connection_info and - 'volume_id' in connection_info['data']): + if 'data' in connection_info: connection_info['data']['device_path'] = conf.source_path vol['connection_info'] = connection_info vol.save(context) + if (not reboot and 'data' in connection_info and + 'volume_id' in connection_info['data']): volume_id = connection_info['data']['volume_id'] encryption = encryptors.get_encryption_metadata( context, self._volume_api, volume_id, connection_info) From 206e7a34c63d17198ffbc0fa929f00e18c29fb11 Mon Sep 17 00:00:00 2001 From: Phil Day Date: Thu, 1 May 2014 14:23:47 +0000 Subject: [PATCH 029/486] Add support for user_id based authentication with Neutron Keystone v3 supports non-unique project/user names, so until the Neutron client provides support for the V3 API and domain based authentication Nova should allow user ID for admin authentication. Change-Id: Idad27ff4206d6ba0bb9aedbbff5172c3144ad3c6 DocImpact: Adds new flag, neutron.admin_user_id --- nova/network/neutronv2/__init__.py | 5 ++++- nova/network/neutronv2/api.py | 8 +++++--- nova/tests/network/test_neutronv2.py | 4 +++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/nova/network/neutronv2/__init__.py b/nova/network/neutronv2/__init__.py index 4a0b76adbb..e442ae8e44 100644 --- a/nova/network/neutronv2/__init__.py +++ b/nova/network/neutronv2/__init__.py @@ -49,7 +49,10 @@ def _get_client(token=None, admin=False): } if admin: - params['username'] = CONF.neutron.admin_username + if CONF.neutron.admin_user_id: + params['user_id'] = CONF.neutron.admin_user_id + else: + params['username'] = CONF.neutron.admin_username if CONF.neutron.admin_tenant_id: params['tenant_id'] = CONF.neutron.admin_tenant_id else: diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 78a6a87bc0..85ca7f65df 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -45,6 +45,8 @@ help='Timeout value for connecting to neutron in seconds', deprecated_group='DEFAULT', deprecated_name='neutron_url_timeout'), + cfg.StrOpt('admin_user_id', + help='User id for connecting to neutron in admin context'), cfg.StrOpt('admin_username', help='Username for connecting to neutron in admin context', deprecated_group='DEFAULT', @@ -60,9 +62,9 @@ deprecated_name='neutron_admin_tenant_id'), cfg.StrOpt('admin_tenant_name', help='Tenant name for connecting to neutron in admin context. ' - 'This option is mutually exclusive with ' - 'admin_tenant_id. Note that with Keystone V3 ' - 'tenant names are only unique within a domain.', + 'This option will be ignored if neutron_admin_tenant_id ' + 'is set. Note that with Keystone V3 tenant names are ' + 'only unique within a domain.', deprecated_group='DEFAULT', deprecated_name='neutron_admin_tenant_name'), cfg.StrOpt('region_name', diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index fc87055723..bcf880fa17 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -2441,6 +2441,7 @@ def client_mock(*args, **kwargs): self.flags(url_timeout=30, group='neutron') if use_id: self.flags(admin_tenant_id='admin_tenant_id', group='neutron') + self.flags(admin_user_id='admin_user_id', group='neutron') if admin_context: my_context = context.get_admin_context() @@ -2451,7 +2452,6 @@ def client_mock(*args, **kwargs): kwargs = { 'auth_url': CONF.neutron.admin_auth_url, 'password': CONF.neutron.admin_password, - 'username': CONF.neutron.admin_username, 'endpoint_url': CONF.neutron.url, 'auth_strategy': None, 'timeout': CONF.neutron.url_timeout, @@ -2460,8 +2460,10 @@ def client_mock(*args, **kwargs): 'token': None} if use_id: kwargs['tenant_id'] = CONF.neutron.admin_tenant_id + kwargs['user_id'] = CONF.neutron.admin_user_id else: kwargs['tenant_name'] = CONF.neutron.admin_tenant_name + kwargs['username'] = CONF.neutron.admin_username client.Client.__init__(**kwargs).WithSideEffects(client_mock) self.mox.ReplayAll() From 5856c3e585891103767eda87035dee8ecaee32ab Mon Sep 17 00:00:00 2001 From: jichenjc Date: Thu, 19 Jun 2014 18:07:52 +0800 Subject: [PATCH 030/486] Format eph disk with specified format in libvirt novaclient has following command parameters: --ephemeral size=[,format=] Create and attach a local ephemeral block device of GB and format it to . so mkfs should use the specified format instead of using default format. Change-Id: I7a8753284d7b1da1a1203e85b430bd0c5012937a Closes-Bug: #1280132 --- nova/tests/virt/libvirt/test_driver.py | 11 +++++++++++ nova/virt/disk/api.py | 16 +++++++++------- nova/virt/libvirt/driver.py | 8 +++++--- 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 57c54668c1..abb9a01f2e 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -4904,6 +4904,17 @@ def fake_get_info(instance): ] self.assertEqual(gotFiles, wantFiles) + @mock.patch.object(utils, 'execute') + def test_create_ephemeral_specified_fs(self, mock_exec): + self.flags(default_ephemeral_format='ext3') + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux', + is_block_dev=True, max_size=20, + specified_fs='ext4') + mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', + 'myVol', '/dev/something', + run_as_root=True) + def test_create_ephemeral_specified_fs_not_valid(self): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index 8b921912ce..7bb4b23fbd 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -88,8 +88,8 @@ FS_FORMAT_NTFS = "ntfs" FS_FORMAT_VFAT = "vfat" -_DEFAULT_FS_BY_OSTYPE = {'linux': 'ext3', - 'windows': 'ntfs'} +_DEFAULT_FS_BY_OSTYPE = {'linux': FS_FORMAT_EXT3, + 'windows': FS_FORMAT_NTFS} for s in CONF.virt_mkfs: # NOTE(yamahata): mkfs command may includes '=' for its options. @@ -105,7 +105,7 @@ def get_fs_type_for_os_type(os_type): return os_type if _MKFS_COMMAND.get(os_type) else 'default' -def mkfs(os_type, fs_label, target, run_as_root=True): +def mkfs(os_type, fs_label, target, run_as_root=True, specified_fs=None): """Format a file or block device using a user provided command for each os type. If user has not provided any configuration, @@ -119,10 +119,12 @@ def mkfs(os_type, fs_label, target, run_as_root=True): if mkfs_command: utils.execute(*mkfs_command.split(), run_as_root=run_as_root) else: - default_fs = CONF.default_ephemeral_format - if not default_fs: - default_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3') - utils.mkfs(default_fs, target, fs_label, run_as_root=run_as_root) + if not specified_fs: + specified_fs = CONF.default_ephemeral_format + if not specified_fs: + specified_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3') + + utils.mkfs(specified_fs, target, fs_label, run_as_root=run_as_root) def resize2fs(image, check_exit_code=False, run_as_root=False): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 425b2efa69..6351feb728 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2459,12 +2459,13 @@ def _create_local(target, local_size, unit='G', def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type, is_block_dev=False, - max_size=None): + max_size=None, specified_fs=None): if not is_block_dev: self._create_local(target, ephemeral_size) # Run as root only for block devices. - disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev) + disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev, + specified_fs=specified_fs) @staticmethod def _create_swap(target, swap_mb, max_size=None): @@ -2678,7 +2679,8 @@ def raw(fname): fetch_func=fn, filename=fname, size=size, - ephemeral_size=eph['size']) + ephemeral_size=eph['size'], + specified_fs=specified_fs) if 'disk.swap' in disk_mapping: mapping = disk_mapping['disk.swap'] From 47898ba8f9526c88a03209dbc35a59d90b79e809 Mon Sep 17 00:00:00 2001 From: Vladik Romanovsky Date: Mon, 12 May 2014 17:24:48 -0400 Subject: [PATCH 031/486] Do not fail cell's instance deletion, if it's missing info_cache Currently the methods in cell messaging are trying to refresh the instance. However, in some corner cases info_cache is not being created for instances in ERROR state. This makes the delete operation, of such instances, to fail, while it should not. Handling the InstanceInfoCacheNotFound exception and not re-raising it, for delete operations. Closes-Bug: #1316373 Change-Id: I33c33e3ac1180e8293d950d60fb126e325a2c0cf --- nova/cells/messaging.py | 4 ++++ nova/tests/cells/test_cells_messaging.py | 27 ++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index d73de85490..1e352695f4 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -836,6 +836,10 @@ def _call_compute_api_with_obj(self, ctxt, instance, method, *args, instance = {'uuid': instance.uuid} self.msg_runner.instance_destroy_at_top(ctxt, instance) + except exception.InstanceInfoCacheNotFound: + if method != 'delete': + raise + fn = getattr(self.compute_api, method, None) return fn(ctxt, instance, *args, **kwargs) diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py index 3afba4a200..0ebf7ada79 100644 --- a/nova/tests/cells/test_cells_messaging.py +++ b/nova/tests/cells/test_cells_messaging.py @@ -17,6 +17,8 @@ Tests For Cells Messaging module """ +import contextlib + import mock import mox from oslo.config import cfg @@ -1103,6 +1105,31 @@ def test_call_compute_api_with_obj(self): extra_properties='props') self.assertEqual('foo', result) + def test_call_compute_api_with_obj_no_cache(self): + instance = objects.Instance() + instance.uuid = uuidutils.generate_uuid() + error = exception.InstanceInfoCacheNotFound( + instance_uuid=instance.uuid) + with mock.patch.object(instance, 'refresh', side_effect=error): + self.assertRaises(exception.InstanceInfoCacheNotFound, + self.tgt_methods_cls._call_compute_api_with_obj, + self.ctxt, instance, 'snapshot') + + def test_call_delete_compute_api_with_obj_no_cache(self): + instance = objects.Instance() + instance.uuid = uuidutils.generate_uuid() + error = exception.InstanceInfoCacheNotFound( + instance_uuid=instance.uuid) + with contextlib.nested( + mock.patch.object(instance, 'refresh', + side_effect=error), + mock.patch.object(self.tgt_compute_api, 'delete')) as (inst, + delete): + self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt, + instance, + 'delete') + delete.assert_called_once_with(self.ctxt, instance) + def test_call_compute_with_obj_unknown_instance(self): instance = objects.Instance() instance.uuid = uuidutils.generate_uuid() From 65341b2547c5eedef720f6a6742cdfe436141907 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 8 Jul 2014 17:31:27 -0500 Subject: [PATCH 032/486] libvirt: Avoid Glance.show on hard_reboot When a Libvirt host is rebooted, we can optionally choose to automatically spin back up the instances using the `resume_state_on_host_boot` call. This, in turn, uses `_hard_reboot` to bring back the instances. The problem is that `_get_guest_xml` which `_hard_reboot` is using is always making a call to Glance.show since `image_meta` isn't being passed into it. In addition to this being an extra latency-heavy call, this is big problem because the request is *server-generated* not *user-generated* so we won't have the necessary user-request context to make the Glance call. In the absense of a general user-impersonation mechanism, the current workaround is to use cached image-metadata, which works in this case as well. So the fix is to pass that `image_meta` that we already pull from instance metadata in `_hard_reboot` and pass it into `_get_guest_xml` and thus avoid the extra request to Glance. Change-Id: I2203ac709405ee784ee5ec017aa475575a46a0df Closes-Bug: 1339386 --- nova/tests/virt/libvirt/test_driver.py | 58 +++++++++++++++++++++++--- nova/virt/libvirt/driver.py | 3 +- 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 7975dd2447..e68d6b4a93 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -5664,7 +5664,12 @@ def fake_get_info(instance_name): conn._destroy(instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info) + + system_meta = utils.instance_sys_meta(instance) + image_meta = utils.get_image_from_system_metadata(system_meta) + conn._get_guest_xml(self.context, instance, network_info, disk_info, + image_meta=image_meta, block_device_info=block_device_info, write_to_disk=True).AndReturn(dummyxml) disk_info_json = '[{"virt_disk_size": 2}]' @@ -5681,6 +5686,50 @@ def fake_get_info(instance_name): conn._hard_reboot(self.context, instance, network_info, block_device_info) + @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall') + @mock.patch('nova.pci.pci_manager.get_instance_pci_devs') + @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') + @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') + @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') + @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') + @mock.patch('nova.virt.libvirt.utils.write_to_file') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config') + @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') + @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') + def test_hard_reboot_doesnt_call_glance_show(self, + mock_destroy, mock_get_disk_info, mock_get_guest_config, + mock_get_instance_path, mock_write_to_file, + mock_get_instance_disk_info, mock_create_images_and_backing, + mock_create_domand_and_network, mock_prepare_pci_devices_for_use, + mock_get_instance_pci_devs, mock_looping_call): + """For a hard reboot, we shouldn't need an additional call to glance + to get the image metadata. + + This is important for automatically spinning up instances on a + host-reboot, since we won't have a user request context that'll allow + the Glance request to go through. We have to rely on the cached image + metadata, instead. + + https://bugs.launchpad.net/nova/+bug/1339386 + """ + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + + instance = db.instance_create(self.context, self.test_instance) + + network_info = mock.MagicMock() + block_device_info = mock.MagicMock() + mock_get_disk_info.return_value = {} + mock_get_guest_config.return_value = mock.MagicMock() + mock_get_instance_path.return_value = '/foo' + mock_looping_call.return_value = mock.MagicMock() + conn._image_api = mock.MagicMock() + + conn._hard_reboot(self.context, instance, network_info, + block_device_info) + + self.assertFalse(conn._image_api.get.called) + def test_power_on(self): def _check_xml_bus(name, xml, block_info): @@ -5733,12 +5782,9 @@ def _get_inst(with_meta=True): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with contextlib.nested( - mock.patch.object(conn, '_destroy', return_value=None), - mock.patch.object(conn, '_create_images_and_backing'), - mock.patch.object(conn, '_create_domain_and_network'), - mock.patch('nova.image.glance.get_remote_image_service', - return_value=(image_service_mock, - instance['image_ref']))): + mock.patch.object(conn, '_destroy', return_value=None), + mock.patch.object(conn, '_create_images_and_backing'), + mock.patch.object(conn, '_create_domain_and_network')): conn.get_info = fake_get_info conn._get_instance_disk_info = _check_xml_bus conn._hard_reboot(self.context, instance, network_info, diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 9bd75fa0f3..be30c0c22c 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2057,6 +2057,7 @@ def _hard_reboot(self, context, instance, network_info, # does we need to (re)generate the xml after the images # are in place. xml = self._get_guest_xml(context, instance, network_info, disk_info, + image_meta=image_meta, block_device_info=block_device_info, write_to_disk=True) @@ -3433,7 +3434,7 @@ def _get_guest_config(self, instance, network_info, image_meta, def _get_guest_xml(self, context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): - # We should get image metadata every time for generating xml + if image_meta is None: image_ref = instance['image_ref'] image_meta = compute_utils.get_image_metadata( From a167654eb3c67bd8b2d0c31614da013dfbfdf183 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Tue, 24 Jun 2014 23:08:20 +0000 Subject: [PATCH 033/486] add get_by_metadata_key to AggregateList object This change adds the get_by_metadata_key function to the AggregateList object and the aggregate_get_by_metadata_key function to the DB API. Previously, the only DB API function available for finding aggregates matching a metadata key returned a dictionary of aggregated metadata. The aggregate_get_by_metadata_key function returns all rows matching the specified metadata key, and is called by the existing aggregate_host_get_by_metadata_key DB API function. The get_by_metadata_key function returns a list of Aggregate objects matching the specified metadata key. Related to blueprint compute-manager-objects-juno Change-Id: If50c9f613dd192ab44577f26a81dd5b40e3a7af7 --- nova/db/api.py | 4 ++++ nova/db/sqlalchemy/api.py | 21 ++++++++++++++------- nova/objects/aggregate.py | 24 +++++++++++++++++++++++- nova/tests/objects/test_aggregate.py | 25 +++++++++++++++++++++++++ nova/tests/objects/test_objects.py | 2 +- 5 files changed, 67 insertions(+), 9 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index ee7ad22209..9bc59a38d2 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1769,6 +1769,10 @@ def aggregate_host_get_by_metadata_key(context, key): return IMPL.aggregate_host_get_by_metadata_key(context, key) +def aggregate_get_by_metadata_key(context, key): + return IMPL.aggregate_get_by_metadata_key(context, key) + + def aggregate_update(context, aggregate_id, values): """Update the attributes of an aggregates. diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 39a76d6a22..b12b35a56d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -5094,13 +5094,7 @@ def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key): def aggregate_host_get_by_metadata_key(context, key): - query = model_query(context, models.Aggregate) - query = query.join("_metadata") - query = query.filter(models.AggregateMetadata.key == key) - query = query.options(contains_eager("_metadata")) - query = query.options(joinedload("_hosts")) - rows = query.all() - + rows = aggregate_get_by_metadata_key(context, key) metadata = collections.defaultdict(set) for agg in rows: for agghost in agg._hosts: @@ -5108,6 +5102,19 @@ def aggregate_host_get_by_metadata_key(context, key): return dict(metadata) +def aggregate_get_by_metadata_key(context, key): + """Return rows that match metadata key. + + :param key Matches metadata key. + """ + query = model_query(context, models.Aggregate) + query = query.join("_metadata") + query = query.filter(models.AggregateMetadata.key == key) + query = query.options(contains_eager("_metadata")) + query = query.options(joinedload("_hosts")) + return query.all() + + def aggregate_update(context, aggregate_id, values): session = get_session() diff --git a/nova/objects/aggregate.py b/nova/objects/aggregate.py index 383f6b51cd..0d58cb85ee 100644 --- a/nova/objects/aggregate.py +++ b/nova/objects/aggregate.py @@ -151,7 +151,8 @@ class AggregateList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added key argument to get_by_host() # Aggregate <= version 1.1 - VERSION = '1.1' + # Version 1.2: Added get_by_metadata_key + VERSION = '1.2' fields = { 'objects': fields.ListOfObjectsField('Aggregate'), @@ -160,8 +161,21 @@ class AggregateList(base.ObjectListBase, base.NovaObject): '1.0': '1.1', '1.1': '1.1', # NOTE(danms): Aggregate was at 1.1 before we added this + '1.2': '1.1', } + @classmethod + def _filter_db_aggregates(cls, db_aggregates, hosts): + if not isinstance(hosts, set): + hosts = set(hosts) + filtered_aggregates = [] + for db_aggregate in db_aggregates: + for host in db_aggregate['hosts']: + if host in hosts: + filtered_aggregates.append(db_aggregate) + break + return filtered_aggregates + @base.remotable_classmethod def get_all(cls, context): db_aggregates = db.aggregate_get_all(context) @@ -173,3 +187,11 @@ def get_by_host(cls, context, host, key=None): db_aggregates = db.aggregate_get_by_host(context, host, key=key) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) + + @base.remotable_classmethod + def get_by_metadata_key(cls, context, key, hosts=None): + db_aggregates = db.aggregate_get_by_metadata_key(context, key=key) + if hosts: + db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts) + return base.obj_make_list(context, cls(context), objects.Aggregate, + db_aggregates) diff --git a/nova/tests/objects/test_aggregate.py b/nova/tests/objects/test_aggregate.py index ca665474a4..a1d016bb2b 100644 --- a/nova/tests/objects/test_aggregate.py +++ b/nova/tests/objects/test_aggregate.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from nova import db from nova import exception from nova.objects import aggregate @@ -163,6 +165,29 @@ def test_by_host(self): self.assertEqual(1, len(aggs)) self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) + @mock.patch('nova.db.aggregate_get_by_metadata_key') + def test_get_by_metadata_key(self, get_by_metadata_key): + get_by_metadata_key.return_value = [fake_aggregate] + aggs = aggregate.AggregateList.get_by_metadata_key( + self.context, 'this') + self.assertEqual(1, len(aggs)) + self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) + + @mock.patch('nova.db.aggregate_get_by_metadata_key') + def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key): + get_by_metadata_key.return_value = [fake_aggregate] + aggs = aggregate.AggregateList.get_by_metadata_key( + self.context, 'this', hosts=['baz']) + self.assertEqual(0, len(aggs)) + + @mock.patch('nova.db.aggregate_get_by_metadata_key') + def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key): + get_by_metadata_key.return_value = [fake_aggregate] + aggs = aggregate.AggregateList.get_by_metadata_key( + self.context, 'this', hosts=['foo', 'bar']) + self.assertEqual(1, len(aggs)) + self.compare_obj(aggs[0], fake_aggregate, subs=SUBS) + class TestAggregateObject(test_objects._LocalTest, _TestAggregateObject): diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 5a84555649..38a5c3fda3 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -895,7 +895,7 @@ def test_object_serialization_iterables(self): 'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d', 'AgentList': '1.0-f8b860e1f2ce80e676ba1a37ddf86e4f', 'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5', - 'AggregateList': '1.1-3e67b6a4840b19c797504cc6056b27ff', + 'AggregateList': '1.2-504137b7ec3855b00d01f165dcebc23e', 'BlockDeviceMapping': '1.1-9968ffe513e7672484b0f528b034cd0f', 'BlockDeviceMappingList': '1.2-d6d7df540ca149dda78b22b4b10bdef3', 'ComputeNode': '1.3-b3b8935a99ca48621dc9ba271d5ed668', From a15fa17efc9d0b0a92bce9665cb3c95a4790d3eb Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 25 Jun 2014 04:33:40 +0000 Subject: [PATCH 034/486] object-ify availability_zones This change converts availability_zones to use the Aggregate and Service objects instead of direct database access. Related to blueprint compute-manager-objects-juno Change-Id: I2ec7373f063ed728dde2af3b7c8259f4391885bc --- nova/availability_zones.py | 40 +++++++++++++++---- .../compute/contrib/test_availability_zone.py | 16 ++++---- nova/tests/objects/test_service.py | 14 +++++-- 3 files changed, 52 insertions(+), 18 deletions(-) diff --git a/nova/availability_zones.py b/nova/availability_zones.py index 12b99409c9..a9483192da 100644 --- a/nova/availability_zones.py +++ b/nova/availability_zones.py @@ -15,9 +15,11 @@ """Availability zone helper functions.""" +import collections + from oslo.config import cfg -from nova import db +from nova import objects from nova.openstack.common import memorycache # NOTE(vish): azs don't change that often, so cache them for an hour to @@ -61,11 +63,34 @@ def _make_cache_key(host): return "azcache-%s" % host.encode('utf-8') +def _build_metadata_by_host(aggregates, hosts=None): + if hosts and not isinstance(hosts, set): + hosts = set(hosts) + metadata = collections.defaultdict(set) + for aggregate in aggregates: + for host in aggregate.hosts: + if hosts and host not in hosts: + continue + metadata[host].add(aggregate.metadata.values()[0]) + return metadata + + +def _build_metadata_by_key(aggregates): + metadata = collections.defaultdict(set) + for aggregate in aggregates: + for key, value in aggregate.metadata.iteritems(): + metadata[key].add(value) + return metadata + + def set_availability_zones(context, services): # Makes sure services isn't a sqlalchemy object services = [dict(service.iteritems()) for service in services] - metadata = db.aggregate_host_get_by_metadata_key(context, - key='availability_zone') + hosts = set([service['host'] for service in services]) + aggregates = objects.AggregateList.get_by_metadata_key(context, + 'availability_zone', hosts=hosts) + metadata = _build_metadata_by_host(aggregates, hosts=hosts) + # gather all of the availability zones associated with a service host for service in services: az = CONF.internal_service_availability_zone if service['topic'] == "compute": @@ -85,8 +110,9 @@ def get_host_availability_zone(context, host, conductor_api=None): metadata = conductor_api.aggregate_metadata_get_by_host( context, host, key='availability_zone') else: - metadata = db.aggregate_metadata_get_by_host( - context, host, key='availability_zone') + aggregates = objects.AggregateList.get_by_host(context, host, + key='availability_zone') + metadata = _build_metadata_by_key(aggregates) if 'availability_zone' in metadata: az = list(metadata['availability_zone'])[0] else: @@ -114,7 +140,7 @@ def get_availability_zones(context, get_only_available=False, :param with_hosts: whether to return hosts part of the AZs :type with_hosts: bool """ - enabled_services = db.service_get_all(context, False) + enabled_services = objects.ServiceList.get_all(context, disabled=False) enabled_services = set_availability_zones(context, enabled_services) available_zones = [] @@ -130,7 +156,7 @@ def get_availability_zones(context, get_only_available=False, available_zones = list(_available_zones.items()) if not get_only_available: - disabled_services = db.service_get_all(context, True) + disabled_services = objects.ServiceList.get_all(context, disabled=True) disabled_services = set_availability_zones(context, disabled_services) not_available_zones = [] azs = available_zones if not with_hosts else dict(available_zones) diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py index e8d3adf5be..4d0515f4a4 100644 --- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py +++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py @@ -25,18 +25,20 @@ from nova import test from nova.tests.api.openstack import fakes from nova.tests import matchers +from nova.tests.objects import test_service def fake_service_get_all(context, disabled=None): def __fake_service(binary, availability_zone, created_at, updated_at, host, disabled): - return {'binary': binary, - 'availability_zone': availability_zone, - 'available_zones': availability_zone, - 'created_at': created_at, - 'updated_at': updated_at, - 'host': host, - 'disabled': disabled} + return dict(test_service.fake_service, + binary=binary, + availability_zone=availability_zone, + available_zones=availability_zone, + created_at=created_at, + updated_at=updated_at, + host=host, + disabled=disabled) if disabled: return [__fake_service("nova-compute", "zone-2", diff --git a/nova/tests/objects/test_service.py b/nova/tests/objects/test_service.py index 365c298dd2..4fdfe0c962 100644 --- a/nova/tests/objects/test_service.py +++ b/nova/tests/objects/test_service.py @@ -16,6 +16,7 @@ from nova import db from nova import exception +from nova.objects import aggregate from nova.objects import service from nova.openstack.common import timeutils from nova.tests.objects import test_compute_node @@ -163,12 +164,17 @@ def test_get_all(self): def test_get_all_with_az(self): self.mox.StubOutWithMock(db, 'service_get_all') - self.mox.StubOutWithMock(db, 'aggregate_host_get_by_metadata_key') + self.mox.StubOutWithMock(aggregate.AggregateList, + 'get_by_metadata_key') db.service_get_all(self.context, disabled=None).AndReturn( [dict(fake_service, topic='compute')]) - db.aggregate_host_get_by_metadata_key( - self.context, key='availability_zone').AndReturn( - {fake_service['host']: ['test-az']}) + agg = aggregate.Aggregate() + agg.name = 'foo' + agg.metadata = {'availability_zone': 'test-az'} + agg.create(self.context) + agg.hosts = [fake_service['host']] + aggregate.AggregateList.get_by_metadata_key(self.context, + 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg]) self.mox.ReplayAll() services = service.ServiceList.get_all(self.context, set_zones=True) self.assertEqual(1, len(services)) From 59ceb686f2b3b93c51171d0d8e0b059b3a644906 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Mon, 23 Jun 2014 20:48:50 +0000 Subject: [PATCH 035/486] object-ify API v2 availability_zone extension This change converts the availability_zone extension to use the Service object instead of direct database access. Related to blueprint compute-manager-objects-juno Change-Id: If5baafd8ec85be1ea1cd6580ae10ef3d51366c9c --- nova/api/openstack/compute/contrib/availability_zone.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py index 136cd2355c..688f0602f3 100644 --- a/nova/api/openstack/compute/contrib/availability_zone.py +++ b/nova/api/openstack/compute/contrib/availability_zone.py @@ -19,7 +19,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import availability_zones -from nova import db +from nova import objects from nova import servicegroup CONF = cfg.CONF @@ -103,7 +103,7 @@ def _describe_availability_zones_verbose(self, context, **kwargs): availability_zones.get_availability_zones(ctxt) # Available services - enabled_services = db.service_get_all(context, False) + enabled_services = objects.ServiceList.get_all(context, disabled=False) enabled_services = availability_zones.set_availability_zones(context, enabled_services) zone_hosts = {} From 8e0e61c971d82137ff870908ce360c9991a001d8 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Tue, 4 Mar 2014 08:25:44 +0800 Subject: [PATCH 036/486] Make compute api use util.check_string_length Make some string variables check in compute use util.check_string_length instead of checking by itself Change-Id: I9a5d4f647611aa00bed44afdeda5763f8fa9c35b --- nova/compute/api.py | 33 ++++++++----------- .../compute/contrib/test_keypairs.py | 6 ++-- nova/tests/compute/test_keypairs.py | 6 ++-- nova/tests/test_utils.py | 13 ++++++++ nova/utils.py | 10 ++++-- 5 files changed, 43 insertions(+), 25 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index f7cbfd5e52..d63b82f272 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -411,16 +411,14 @@ def _check_metadata_properties_quota(self, context, metadata=None): # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for k, v in metadata.iteritems(): - if not isinstance(k, six.string_types): - msg = _("Metadata property key '%s' is not a string.") % k - raise exception.InvalidMetadata(reason=msg) - if not isinstance(v, six.string_types): - msg = (_("Metadata property value '%(v)s' for key '%(k)s' is " - "not a string.") % {'v': v, 'k': k}) - raise exception.InvalidMetadata(reason=msg) - if len(k) == 0: - msg = _("Metadata property key blank") - raise exception.InvalidMetadata(reason=msg) + try: + utils.check_string_length(v) + utils.check_string_length(k, min_length=1) + except exception.InvalidInput as e: + raise exception.InvalidMetadata(reason=e.format_message()) + + # For backward compatible we need raise HTTPRequestEntityTooLarge + # so we need to keep InvalidMetadataSize exception here if len(k) > 255: msg = _("Metadata property key greater than 255 characters") raise exception.InvalidMetadataSize(reason=msg) @@ -3480,9 +3478,11 @@ def _validate_new_key_pair(self, context, user_id, key_name): raise exception.InvalidKeypair( reason=_("Keypair name contains unsafe characters")) - if not 0 < len(key_name) < 256: + try: + utils.check_string_length(key_name, min_length=1, max_length=255) + except exception.InvalidInput: raise exception.InvalidKeypair( - reason=_('Keypair name must be between ' + reason=_('Keypair name must be string and between ' '1 and 255 characters long')) count = QUOTAS.count(context, 'key_pairs', user_id) @@ -3572,9 +3572,8 @@ def validate_property(self, value, property, allowed): except AttributeError: msg = _("Security group %s is not a string or unicode") % property self.raise_invalid_property(msg) - if not val: - msg = _("Security group %s cannot be empty.") % property - self.raise_invalid_property(msg) + + utils.check_string_length(val, min_length=1, max_length=255) if allowed and not re.match(allowed, val): # Some validation to ensure that values match API spec. @@ -3586,10 +3585,6 @@ def validate_property(self, value, property, allowed): {'value': value, 'allowed': allowed, 'property': property.capitalize()}) self.raise_invalid_property(msg) - if len(val) > 255: - msg = _("Security group %s should not be greater " - "than 255 characters.") % property - self.raise_invalid_property(msg) def ensure_default(self, context): """Ensure that a context has a security group. diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py index dd1851f056..53294ecbc7 100644 --- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/compute/contrib/test_keypairs.py @@ -111,7 +111,8 @@ def test_keypair_create_with_empty_name(self): res_dict = jsonutils.loads(res.body) self.assertEqual( 'Keypair data is invalid: ' - 'Keypair name must be between 1 and 255 characters long', + 'Keypair name must be string and between 1 ' + 'and 255 characters long', res_dict['badRequest']['message']) def test_keypair_create_with_name_too_long(self): @@ -129,7 +130,8 @@ def test_keypair_create_with_name_too_long(self): res_dict = jsonutils.loads(res.body) self.assertEqual( 'Keypair data is invalid: ' - 'Keypair name must be between 1 and 255 characters long', + 'Keypair name must be string and between 1 ' + 'and 255 characters long', res_dict['badRequest']['message']) def test_keypair_create_with_non_alphanumeric_name(self): diff --git a/nova/tests/compute/test_keypairs.py b/nova/tests/compute/test_keypairs.py index 8b8f8c10b5..06482f24b9 100644 --- a/nova/tests/compute/test_keypairs.py +++ b/nova/tests/compute/test_keypairs.py @@ -124,11 +124,13 @@ def assertInvalidKeypair(self, expected_message, name): self.assertKeyNameRaises(exception.InvalidKeypair, msg, name) def test_name_too_short(self): - msg = _('Keypair name must be between 1 and 255 characters long') + msg = _('Keypair name must be string and between 1 ' + 'and 255 characters long') self.assertInvalidKeypair(msg, '') def test_name_too_long(self): - msg = _('Keypair name must be between 1 and 255 characters long') + msg = _('Keypair name must be string and between 1 ' + 'and 255 characters long') self.assertInvalidKeypair(msg, 'x' * 256) def test_invalid_chars(self): diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index d83c086a4a..ee061de844 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -754,6 +754,19 @@ def test_check_string_length(self): utils.check_string_length, 'a' * 256, 'name', max_length=255) + def test_check_string_length_noname(self): + self.assertIsNone(utils.check_string_length( + 'test', max_length=255)) + self.assertRaises(exception.InvalidInput, + utils.check_string_length, + 11, max_length=255) + self.assertRaises(exception.InvalidInput, + utils.check_string_length, + '', min_length=1) + self.assertRaises(exception.InvalidInput, + utils.check_string_length, + 'a' * 256, max_length=255) + class ValidateIntegerTestCase(test.NoDBTestCase): def test_valid_inputs(self): diff --git a/nova/utils.py b/nova/utils.py index bf5a522926..856d5f56e2 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -954,7 +954,7 @@ def wrapper(*args, **kwargs): return wrapper -def check_string_length(value, name, min_length=0, max_length=None): +def check_string_length(value, name=None, min_length=0, max_length=None): """Check the length of specified string :param value: the value of the string :param name: the name of the string @@ -962,9 +962,15 @@ def check_string_length(value, name, min_length=0, max_length=None): :param max_length: the max_length of the string """ if not isinstance(value, six.string_types): - msg = _("%s is not a string or unicode") % name + if name is None: + msg = _("The input is not a string or unicode") + else: + msg = _("%s is not a string or unicode") % name raise exception.InvalidInput(message=msg) + if name is None: + name = value + if len(value) < min_length: msg = _("%(name)s has a minimum character requirement of " "%(min_length)s.") % {'name': name, 'min_length': min_length} From 541aa57dbcc75f1a217a1bfa06daa1a1b5bc6142 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Tue, 17 Jun 2014 08:40:45 +0800 Subject: [PATCH 037/486] Remove pause/unpause NotImplementedError API layer There are 2 kinds of RPC call from API layer to compute layer, one is cast and another is call. For cast, the RPC message will be posted and the API service will not wait for the message to be processed. So it won't be able to catch the exception raised in compute layer, so catch and handle the exception is useless and error leading. This patch removes code in API layer for pause and unpause functions. Change-Id: I7cbb8179b3eec8496b90ff5cf07397522d52c5de --- .../compute/contrib/admin_actions.py | 6 ---- .../compute/contrib/test_admin_actions.py | 33 ------------------- 2 files changed, 39 deletions(-) diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py index e01939ff42..0df6c5ee78 100644 --- a/nova/api/openstack/compute/contrib/admin_actions.py +++ b/nova/api/openstack/compute/contrib/admin_actions.py @@ -62,9 +62,6 @@ def _pause(self, req, id, body): except exception.InstanceNotFound: msg = _("Server not found") raise exc.HTTPNotFound(explanation=msg) - except NotImplementedError: - msg = _("Virt driver does not implement pause function.") - raise exc.HTTPNotImplemented(explanation=msg) except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::pause %s"), readable) @@ -87,9 +84,6 @@ def _unpause(self, req, id, body): except exception.InstanceNotFound: msg = _("Server not found") raise exc.HTTPNotFound(explanation=msg) - except NotImplementedError: - msg = _("Virt driver does not implement unpause function.") - raise exc.HTTPNotImplemented(explanation=msg) except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::unpause %s"), readable) diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py index 9a1a05a427..13b811c7f2 100644 --- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py +++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py @@ -138,28 +138,6 @@ def _test_invalid_state(self, action, method=None, body_map=None, self.mox.VerifyAll() self.mox.UnsetStubs() - def _test_not_implemented_state(self, action, method=None, - error_text=None): - if method is None: - method = action - body_map = {} - compute_api_args_map = {} - instance = self._stub_instance_get() - args, kwargs = compute_api_args_map.get(action, ((), {})) - getattr(self.compute_api, method)(self.context, instance, - *args, **kwargs).AndRaise( - NotImplementedError()) - self.mox.ReplayAll() - - res = self._make_request('/servers/%s/action' % instance['uuid'], - {action: body_map.get(action)}) - self.assertEqual(501, res.status_int) - self.assertIn(error_text, res.body) - # Do these here instead of tearDown because this method is called - # more than once for the same test case - self.mox.VerifyAll() - self.mox.UnsetStubs() - def _test_locked_instance(self, action, method=None, body_map=None, compute_api_args_map=None): if method is None: @@ -218,17 +196,6 @@ def test_actions_raise_conflict_on_invalid_state(self): # Re-mock this. self.mox.StubOutWithMock(self.compute_api, 'get') - def test_actions_raise_on_not_implemented(self): - tests = [ - ('pause', 'Virt driver does not implement pause function.'), - ('unpause', 'Virt driver does not implement unpause function.') - ] - for (action, error_text) in tests: - self.mox.StubOutWithMock(self.compute_api, action) - self._test_not_implemented_state(action, error_text=error_text) - # Re-mock this. - self.mox.StubOutWithMock(self.compute_api, 'get') - def test_actions_with_non_existed_instance(self): actions = ['pause', 'unpause', 'suspend', 'resume', 'resetNetwork', 'injectNetworkInfo', 'lock', From 28b37c1a707f5e958221b4ee28c4832d081eb706 Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Wed, 9 Jul 2014 11:59:15 +0200 Subject: [PATCH 038/486] Prepend '/dev/' to supplied dev names in the API Make sure that all user input device names end up in the database with the '/dev/' prefix. This will make things more consistent and avoid issues when attempting to parse device names. This includes two main sources - block device mapping that can be passed as part of a request or as image metadata, and root device name, passed as image metadata. Closes-bug: #1337821 Change-Id: Ibda82f511be99f1a68f2f77c72601a1b006be7a0 --- nova/block_device.py | 2 ++ nova/compute/api.py | 5 +++-- nova/tests/db/test_db_api.py | 22 +++++++++++----------- nova/tests/test_block_device.py | 18 ++++++++++++++++++ 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/nova/block_device.py b/nova/block_device.py index fbc87099db..b6dd12b64d 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -82,6 +82,8 @@ def __init__(self, bdm_dict=None, do_not_default=None): do_not_default = do_not_default or set() self._validate(bdm_dict) + if bdm_dict.get('device_name'): + bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name']) # NOTE (ndipanov): Never default db fields self.update( dict((field, None) diff --git a/nova/compute/api.py b/nova/compute/api.py index c025616711..b8c30999b2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -757,8 +757,9 @@ def _validate_and_build_base_options(self, context, instance_type, key_name) key_data = key_pair.public_key - root_device_name = block_device.properties_root_device_name( - boot_meta.get('properties', {})) + root_device_name = block_device.prepend_dev( + block_device.properties_root_device_name( + boot_meta.get('properties', {}))) system_metadata = flavors.save_flavor_info( dict(), instance_type) diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index ad7398be66..1b6273d0ce 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -4617,18 +4617,18 @@ def test_block_device_mapping_get_all_by_instance(self): uuid2 = db.instance_create(self.ctxt, {})['uuid'] bmds_values = [{'instance_uuid': uuid1, - 'device_name': 'first'}, + 'device_name': '/dev/vda'}, {'instance_uuid': uuid2, - 'device_name': 'second'}, + 'device_name': '/dev/vdb'}, {'instance_uuid': uuid2, - 'device_name': 'third'}] + 'device_name': '/dev/vdc'}] for bdm in bmds_values: self._create_bdm(bdm) bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1) self.assertEqual(len(bmd), 1) - self.assertEqual(bmd[0]['device_name'], 'first') + self.assertEqual(bmd[0]['device_name'], '/dev/vda') bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2) self.assertEqual(len(bmd), 2) @@ -4644,27 +4644,27 @@ def test_block_device_mapping_destroy_by_instance_and_volumne(self): vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f' vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f' - self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1}) - self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2}) + self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1}) + self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2}) uuid = self.instance['uuid'] db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid, vol_id1) bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdms), 1) - self.assertEqual(bdms[0]['device_name'], 'fake2') + self.assertEqual(bdms[0]['device_name'], '/dev/vdb') def test_block_device_mapping_destroy_by_instance_and_device(self): - self._create_bdm({'device_name': 'fake1'}) - self._create_bdm({'device_name': 'fake2'}) + self._create_bdm({'device_name': '/dev/vda'}) + self._create_bdm({'device_name': '/dev/vdb'}) uuid = self.instance['uuid'] - params = (self.ctxt, uuid, 'fake1') + params = (self.ctxt, uuid, '/dev/vdb') db.block_device_mapping_destroy_by_instance_and_device(*params) bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdms), 1) - self.assertEqual(bdms[0]['device_name'], 'fake2') + self.assertEqual(bdms[0]['device_name'], '/dev/vda') def test_block_device_mapping_get_by_volume_id(self): self._create_bdm({'volume_id': 'fake_id'}) diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py index 2f0792a202..0296b71c99 100644 --- a/nova/tests/test_block_device.py +++ b/nova/tests/test_block_device.py @@ -307,6 +307,24 @@ def fake_validate(obj, dct): self.assertNotIn('db_field1', dev_dict) self.assertFalse('db_field2'in dev_dict) + def test_init_prepend_dev_to_device_name(self): + bdm = {'id': 3, 'instance_uuid': 'fake-instance', + 'device_name': 'vda', + 'source_type': 'volume', + 'destination_type': 'volume', + 'volume_id': 'fake-volume-id-1', + 'boot_index': 0} + bdm_dict = block_device.BlockDeviceDict(bdm) + self.assertEqual('/dev/vda', bdm_dict['device_name']) + + bdm['device_name'] = '/dev/vdb' + bdm_dict = block_device.BlockDeviceDict(bdm) + self.assertEqual('/dev/vdb', bdm_dict['device_name']) + + bdm['device_name'] = None + bdm_dict = block_device.BlockDeviceDict(bdm) + self.assertIsNone(bdm_dict['device_name']) + def test_validate(self): self.assertRaises(exception.InvalidBDMFormat, block_device.BlockDeviceDict, From e686131fc4b8724328f0922067569120c90eb261 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Tue, 25 Mar 2014 06:17:01 +0800 Subject: [PATCH 039/486] Restore instance flavor info when driver finish_migration fails when instance resize, it will call finish_migration at last to create new instance and destroy old instance. If driver layer has problem in create new instance, the instance will be set to 'ERROR' state. we are able to use reset-state --active to reset the instance and use it but the instance information is set to new flavor and not reverted to old one Change-Id: I5961260f50e7893c7bf03e329e06edfd4f295640 Closes-Bug: #1296519 --- nova/compute/manager.py | 37 +++++++++++++++++++----------- nova/tests/compute/test_compute.py | 31 +++++++++++++++++++++---- 2 files changed, 51 insertions(+), 17 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 93a752c9c2..273e3e4217 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3564,6 +3564,17 @@ def _terminate_volume_connections(self, context, instance, bdms): self.volume_api.terminate_connection(context, bdm.volume_id, connector) + @staticmethod + def _save_instance_info(instance, instance_type, sys_meta): + flavors.save_flavor_info(sys_meta, instance_type) + instance.instance_type_id = instance_type['id'] + instance.memory_mb = instance_type['memory_mb'] + instance.vcpus = instance_type['vcpus'] + instance.root_gb = instance_type['root_gb'] + instance.ephemeral_gb = instance_type['ephemeral_gb'] + instance.system_metadata = sys_meta + instance.save() + def _finish_resize(self, context, instance, migration, disk_info, image): resize_instance = False @@ -3581,14 +3592,7 @@ def _finish_resize(self, context, instance, migration, disk_info, if old_instance_type_id != new_instance_type_id: instance_type = flavors.extract_flavor(instance, prefix='new_') - flavors.save_flavor_info(sys_meta, instance_type) - instance.instance_type_id = instance_type['id'] - instance.memory_mb = instance_type['memory_mb'] - instance.vcpus = instance_type['vcpus'] - instance.root_gb = instance_type['root_gb'] - instance.ephemeral_gb = instance_type['ephemeral_gb'] - instance.system_metadata = sys_meta - instance.save() + self._save_instance_info(instance, instance_type, sys_meta) resize_instance = True # NOTE(tr3buchet): setup networks on destination host @@ -3617,11 +3621,18 @@ def _finish_resize(self, context, instance, migration, disk_info, # NOTE(mriedem): If the original vm_state was STOPPED, we don't # automatically power on the instance after it's migrated power_on = old_vm_state != vm_states.STOPPED - self.driver.finish_migration(context, migration, instance, - disk_info, - network_info, - image, resize_instance, - block_device_info, power_on) + + try: + self.driver.finish_migration(context, migration, instance, + disk_info, + network_info, + image, resize_instance, + block_device_info, power_on) + except Exception: + with excutils.save_and_reraise_exception(): + if resize_instance: + self._save_instance_info(instance, + old_instance_type, sys_meta) migration.status = 'finished' migration.save(context.elevated()) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index f9de8e610c..7e681591d7 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -4397,10 +4397,11 @@ def fake(*args, **kwargs): self._stub_out_resize_network_methods() - instance = self._create_fake_instance_obj() + old_flavor_name = 'm1.tiny' + instance = self._create_fake_instance_obj(type_name=old_flavor_name) reservations = self._ensure_quota_reservations_rolledback(instance) - instance_type = flavors.get_default_flavor() + instance_type = flavors.get_flavor_by_name('m1.small') self.compute.prep_resize(self.context, instance=instance, instance_type=instance_type, @@ -4420,11 +4421,33 @@ def fake(*args, **kwargs): migration=migration, disk_info={}, image={}, instance=instance, reservations=reservations) - # NOTE(comstud): error path doesn't use objects, so our object - # is not updated. Refresh and compare against the DB. instance.refresh() self.assertEqual(vm_states.ERROR, instance.vm_state) + old_flavor = flavors.get_flavor_by_name(old_flavor_name) + self.assertEqual(old_flavor['memory_mb'], instance.memory_mb) + self.assertEqual(old_flavor['vcpus'], instance.vcpus) + self.assertEqual(old_flavor['root_gb'], instance.root_gb) + self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb) + self.assertEqual(old_flavor['id'], instance.instance_type_id) + self.assertNotEqual(instance_type['id'], instance.instance_type_id) + + def test_save_instance_info(self): + old_flavor_name = 'm1.tiny' + new_flavor_name = 'm1.small' + instance = self._create_fake_instance_obj(type_name=old_flavor_name) + new_flavor = flavors.get_flavor_by_name(new_flavor_name) + + self.compute._save_instance_info(instance, new_flavor, + instance.system_metadata) + + self.assertEqual(new_flavor['memory_mb'], instance.memory_mb) + self.assertEqual(new_flavor['vcpus'], instance.vcpus) + self.assertEqual(new_flavor['root_gb'], instance.root_gb) + self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb) + self.assertEqual(new_flavor['id'], instance.instance_type_id) + self.assertEqual(new_flavor['id'], instance.instance_type_id) + def test_rebuild_instance_notification(self): # Ensure notifications on instance migrate/resize. old_time = datetime.datetime(2012, 4, 1) From ebd520e331e88aeac302de86628c7c3e80c1308c Mon Sep 17 00:00:00 2001 From: jichenjc Date: Fri, 11 Jul 2014 16:09:13 +0800 Subject: [PATCH 040/486] Fix error status code for multinic When passing bad body in a request, most APIs return BadRequest response. However, multinic API doesn't do it. This patch fixes the error status code and adds a unit test related to this change. This patch's idea most came from following patch: https://review.openstack.org/#/c/107266/ Change-Id: If1ef2077a0e9823dfc04e4ef5a6dfb02daf1a8dc --- nova/api/openstack/compute/contrib/multinic.py | 4 ++-- nova/tests/api/openstack/compute/contrib/test_multinic_xs.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/compute/contrib/multinic.py b/nova/api/openstack/compute/contrib/multinic.py index 6887c2ef8e..c0822e4add 100644 --- a/nova/api/openstack/compute/contrib/multinic.py +++ b/nova/api/openstack/compute/contrib/multinic.py @@ -52,7 +52,7 @@ def _add_fixed_ip(self, req, id, body): # Validate the input entity if 'networkId' not in body['addFixedIp']: msg = _("Missing 'networkId' argument for addFixedIp") - raise exc.HTTPUnprocessableEntity(explanation=msg) + raise exc.HTTPBadRequest(explanation=msg) instance = self._get_instance(context, id, want_objects=True) network_id = body['addFixedIp']['networkId'] @@ -68,7 +68,7 @@ def _remove_fixed_ip(self, req, id, body): # Validate the input entity if 'address' not in body['removeFixedIp']: msg = _("Missing 'address' argument for removeFixedIp") - raise exc.HTTPUnprocessableEntity(explanation=msg) + raise exc.HTTPBadRequest(explanation=msg) instance = self._get_instance(context, id, want_objects=True) diff --git a/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py b/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py index f6786686c3..9a6cea0407 100644 --- a/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py +++ b/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py @@ -90,7 +90,7 @@ def test_add_fixed_ip_no_network(self): req.headers['content-type'] = 'application/json' resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 422) + self.assertEqual(resp.status_int, 400) self.assertEqual(last_add_fixed_ip, (None, None)) def test_remove_fixed_ip(self): @@ -118,5 +118,5 @@ def test_remove_fixed_ip_no_address(self): req.headers['content-type'] = 'application/json' resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 422) + self.assertEqual(resp.status_int, 400) self.assertEqual(last_remove_fixed_ip, (None, None)) From 60c90f73261efb8c73ecc02152307c81265cab13 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 24 Jun 2014 14:03:17 -0500 Subject: [PATCH 041/486] libvirt+lxc: Unmount guest FS from host on error If an error occurs during `_create_domain`, we need to ensure that the guest's FS is unmounted from the host in all cases. This is necessary because, if we leave the FS mounted to the host, the `lvremove` triggered by `delete` won't work because the filesystem will 'still be in use'. The solution is to wrap the code in a `try/finally` to ensure the cleanup routines (which unmount) are always called. Change-Id: If863cf813dddc1e4554fb87b945c68b75b25f9a2 Closes-Bug: 1333827 --- nova/tests/virt/libvirt/test_driver.py | 33 ++++++++ nova/virt/libvirt/driver.py | 108 +++++++++++++------------ 2 files changed, 89 insertions(+), 52 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 026461c582..a6971a22d3 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -1728,6 +1728,39 @@ def test_get_guest_config_with_video_driver_vram(self): self.assertEqual(cfg.devices[6].type, "qxl") self.assertEqual(cfg.devices[6].vram, 64) + @mock.patch('nova.virt.disk.api.teardown_container') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') + @mock.patch('nova.virt.disk.api.setup_container') + @mock.patch('nova.openstack.common.fileutils.ensure_tree') + @mock.patch.object(fake_libvirt_utils, 'get_instance_path') + def test_unmount_fs_if_error_during_lxc_create_domain(self, + mock_get_inst_path, mock_ensure_tree, mock_setup_container, + mock_get_info, mock_teardown): + """If we hit an error during a `_create_domain` call to `libvirt+lxc` + we need to ensure the guest FS is unmounted from the host so that any + future `lvremove` calls will work. + """ + self.flags(virt_type='lxc', group='libvirt') + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + mock_domain = mock.MagicMock() + mock_instance = mock.MagicMock() + mock_get_inst_path.return_value = '/tmp/' + mock_image_backend = mock.MagicMock() + conn.image_backend = mock_image_backend + mock_image = mock.MagicMock() + mock_image.path = '/tmp/test.img' + conn.image_backend.image.return_value = mock_image + mock_setup_container.return_value = '/dev/nbd0' + mock_get_info.side_effect = exception.InstanceNotFound( + instance_id='foo') + + mock_domain.createWithFlags.side_effect = ValueError('somethingbad') + + self.assertRaises(ValueError, conn._create_domain, domain=mock_domain, + instance=mock_instance) + + mock_teardown.assert_called_with(container_dir='/tmp/rootfs') + def test_video_driver_flavor_limit_not_set(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index c9d875a95e..511674dc09 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -3599,71 +3599,75 @@ def get_info(self, instance): 'cpu_time': dom_info[4], 'id': virt_dom.ID()} - def _create_domain(self, xml=None, domain=None, - instance=None, launch_flags=0, power_on=True): - """Create a domain. - - Either domain or xml must be passed in. If both are passed, then - the domain definition is overwritten from the xml. - """ - inst_path = None - if instance: - inst_path = libvirt_utils.get_instance_path(instance) - - if CONF.libvirt.virt_type == 'lxc': - if not inst_path: - inst_path = None - - container_dir = os.path.join(inst_path, 'rootfs') - fileutils.ensure_tree(container_dir) - image = self.image_backend.image(instance, 'disk') - container_root_device = disk.setup_container(image.path, - container_dir=container_dir, - use_cow=CONF.use_cow_images) + def _create_domain_setup_lxc(self, instance): + inst_path = libvirt_utils.get_instance_path(instance) + container_dir = os.path.join(inst_path, 'rootfs') + fileutils.ensure_tree(container_dir) + image = self.image_backend.image(instance, 'disk') + container_root_device = disk.setup_container(image.path, + container_dir=container_dir, + use_cow=CONF.use_cow_images) + try: #Note(GuanQiang): save container root device name here, used for # detaching the linked image device when deleting # the lxc instance. if container_root_device: instance.root_device_name = container_root_device instance.save() + except Exception: + with excutils.save_and_reraise_exception(): + self._create_domain_cleanup_lxc(instance) - if xml: - try: + def _create_domain_cleanup_lxc(self, instance): + inst_path = libvirt_utils.get_instance_path(instance) + container_dir = os.path.join(inst_path, 'rootfs') + + try: + state = self.get_info(instance)['state'] + except exception.InstanceNotFound: + # The domain may not be present if the instance failed to start + state = None + + if state == power_state.RUNNING: + # NOTE(uni): Now the container is running with its own private + # mount namespace and so there is no need to keep the container + # rootfs mounted in the host namespace + disk.clean_lxc_namespace(container_dir=container_dir) + else: + disk.teardown_container(container_dir=container_dir) + + def _create_domain(self, xml=None, domain=None, + instance=None, launch_flags=0, power_on=True): + """Create a domain. + + Either domain or xml must be passed in. If both are passed, then + the domain definition is overwritten from the xml. + """ + err = None + if instance and CONF.libvirt.virt_type == 'lxc': + self._create_domain_setup_lxc(instance) + try: + if xml: + err = _LE('Error defining a domain with XML: %s') % xml domain = self._conn.defineXML(xml) - except Exception as e: - LOG.error(_LE("An error occurred while trying to define " - "a domain with xml: %s"), xml) - raise e - if power_on: - try: + if power_on: + err = _LE('Error launching a defined domain with XML: %s') \ + % domain.XMLDesc(0) domain.createWithFlags(launch_flags) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("An error occurred while trying to launch a " - "defined domain with xml: %s"), - domain.XMLDesc(0)) - if not utils.is_neutron(): - try: + if not utils.is_neutron(): + err = _LE('Error enabling hairpin mode with XML: %s') \ + % domain.XMLDesc(0) self._enable_hairpin(domain.XMLDesc(0)) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_LE("An error occurred while enabling hairpin " - "mode on domain with xml: %s"), - domain.XMLDesc(0)) - - # NOTE(uni): Now the container is running with its own private mount - # namespace and so there is no need to keep the container rootfs - # mounted in the host namespace - if CONF.libvirt.virt_type == 'lxc': - state = self.get_info(instance)['state'] - container_dir = os.path.join(inst_path, 'rootfs') - if state == power_state.RUNNING: - disk.clean_lxc_namespace(container_dir=container_dir) - else: - disk.teardown_container(container_dir=container_dir) + except Exception: + with excutils.save_and_reraise_exception(): + if err: + LOG.error(err) + finally: + if instance and CONF.libvirt.virt_type == 'lxc': + self._create_domain_cleanup_lxc(instance) return domain From a55f41492e5ce9bbc2f2ef3435a7e7e65bf6cb3e Mon Sep 17 00:00:00 2001 From: ftersin Date: Thu, 22 May 2014 20:01:45 +0400 Subject: [PATCH 042/486] Store volume backed snapshot in current tenant. Fix owner of a creating volume backed snapshot. Snapshot of an instance booted on a volume based on another tenant's public image is created in the wrong tenant when invoked by admin. Snapshot metadata (including owner) is based on image metadata. But when the snapshot is being created by admin, Glance doesn't change it's owner if it's set. So we forcibly remove owner (tenant) attribute from image metadata. Change-Id: I662dfa4f81e24cb2553ffa2578f4c8530eee9fd3 Closes-Bug: #1322195 --- nova/compute/api.py | 2 +- nova/tests/compute/test_compute_api.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 2610bfcc77..5c88c2527c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -2072,7 +2072,7 @@ def snapshot_volume_backed(self, context, instance, image_meta, name, properties['block_device_mapping'] = mapping properties['bdm_v2'] = True - for attr in ('status', 'location', 'id'): + for attr in ('status', 'location', 'id', 'owner'): image_meta.pop(attr, None) # the new image is simply a bucket of properties (particularly the diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index a1edeab2e4..e418c02482 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -1584,6 +1584,7 @@ def test_snapshot_volume_backed(self): 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away', + 'owner': 'fake-tenant', } expect_meta = { From f44b4faacd2a21fa27751a32e7a908022ab089ad Mon Sep 17 00:00:00 2001 From: Don Dugger Date: Wed, 18 Jun 2014 13:13:11 -0600 Subject: [PATCH 043/486] Change compute updates from periodic to on demand Currently, all compute nodes update status info in the DB on a periodic basis (the period is currently 60 seconds). Given that the status of the node only changes at specific points (mainly image creation/destruction) this leads to significant DB overhead on a large system. This BP changes the update mechanism to only update the DB when a node state changes by keeping an old copy of the state info and only updating the DB if the new state info is different. Docimpact - check to see if there's a documentation impact to this change. Change-Id: I19c174488b61977358a4d617cc4b590e317e665a Implements: blueprint on-demand-compute-update --- nova/compute/resource_tracker.py | 10 ++++++++++ nova/tests/compute/test_resource_tracker.py | 21 +++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index aaf0eb3890..abf4f8e20b 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -76,6 +76,7 @@ def __init__(self, host, driver, nodename): monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self) self.notifier = rpc.get_notifier() + self.old_resources = {} @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) def instance_claim(self, context, instance_ref, limits=None): @@ -439,8 +440,17 @@ def _report_final_resource_view(self, resources): if 'pci_stats' in resources: LOG.audit(_("PCI stats: %s"), resources['pci_stats']) + def _resource_change(self, resources): + """Check to see if any resouces have changed.""" + if cmp(resources, self.old_resources) != 0: + self.old_resources = resources + return True + return False + def _update(self, context, values): """Persist the compute node updates to the DB.""" + if not self._resource_change(values): + return if "service" in self.compute_node: del self.compute_node['service'] self.compute_node = self.conductor_api.compute_node_update( diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 0486abc514..8cf1bac03f 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -425,6 +425,7 @@ def setUp(self): self.updated = False self.deleted = False + self.update_call_count = 0 self.tracker = self._tracker() self._migrations = {} @@ -450,9 +451,8 @@ def _fake_service_get_by_compute_host(self, ctx, host): def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): + self.update_call_count += 1 self.updated = True - values['stats'] = [{"key": "num_instances", "value": "1"}] - self.compute.update(values) return self.compute @@ -1144,3 +1144,20 @@ def test_get_host_metrics(self): self.context, 'compute.metrics.update', payload) self.assertEqual(metrics, expected_metrics) + + +class TrackerPeriodicTestCase(BaseTrackerTestCase): + + def test_periodic_status_update(self): + # verify update called on instantiation + self.assertEqual(1, self.update_call_count) + + # verify update not called if no change to resources + self.tracker.update_available_resource(self.context) + self.assertEqual(1, self.update_call_count) + + # verify update is called when resources change + driver = self.tracker.driver + driver.memory_mb += 1 + self.tracker.update_available_resource(self.context) + self.assertEqual(2, self.update_call_count) From 502fa4875a3975990cbdf84fc0f846f7ede8fa92 Mon Sep 17 00:00:00 2001 From: Swami Reddy Date: Sat, 12 Jul 2014 08:21:25 +0530 Subject: [PATCH 044/486] Add instanceset info to StopInstance response Currently stopinstance response missing the instanceset information with InstanceID, current state and previous state details. It just returns the "True". As per the AWS EC2 API reference document, the StopInstance response elements should include the instanceset information as below: req-a7326465-5ce2-4ed6-ab89-394b38cca85f i-00000001 80 stopped 16 running Included the instanceset into stopinstance response elements and updated the test cases for stopinstance response elements in nova/tests/api/ec2/test_cloud.py file. Closes-bug: #1321239 Change-Id: I4d8a6faf2689a7df71920183682fd1e403ce2a42 --- nova/api/ec2/cloud.py | 14 +++++++++++++- nova/tests/api/ec2/test_cloud.py | 32 ++++++++++++++++++++++++++++---- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f6afb1f123..2a96ef0b03 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1048,6 +1048,17 @@ def _format_terminate_instances(self, context, instance_id, instances_set.append(i) return {'instancesSet': instances_set} + def _format_stop_instances(self, context, instance_ids, previous_states): + instances_set = [] + for (ec2_id, previous_state) in zip(instance_ids, previous_states): + i = {} + i['instanceId'] = ec2_id + i['previousState'] = _state_description(previous_state['vm_state'], + previous_state['shutdown_terminate']) + i['currentState'] = _state_description(vm_states.STOPPED, True) + instances_set.append(i) + return {'instancesSet': instances_set} + def _format_instance_bdm(self, context, instance_uuid, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType.""" @@ -1435,7 +1446,8 @@ def stop_instances(self, context, instance_id, **kwargs): for instance in instances: extensions.check_compute_policy(context, 'stop', instance) self.compute_api.stop(context, instance) - return True + return self._format_stop_instances(context, instance_id, + instances) def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 5ad292d9ec..f146da969d 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -2221,14 +2221,26 @@ def test_stop_start_instance(self): self.cloud.start_instances, self.context, [instance_id]) + expected = {'instancesSet': [ + {'instanceId': 'i-00000001', + 'previousState': {'code': 16, + 'name': 'running'}, + 'currentState': {'code': 80, + 'name': 'stopped'}}]} result = self.cloud.stop_instances(self.context, [instance_id]) - self.assertTrue(result) + self.assertEqual(result, expected) result = self.cloud.start_instances(self.context, [instance_id]) self.assertTrue(result) + expected = {'instancesSet': [ + {'instanceId': 'i-00000001', + 'previousState': {'code': 16, + 'name': 'running'}, + 'currentState': {'code': 80, + 'name': 'stopped'}}]} result = self.cloud.stop_instances(self.context, [instance_id]) - self.assertTrue(result) + self.assertEqual(result, expected) expected = {'instancesSet': [ {'instanceId': 'i-00000001', @@ -2283,8 +2295,14 @@ def test_stop_instances(self): 'max_count': 1, } instance_id = self._run_instance(**kwargs) + expected = {'instancesSet': [ + {'instanceId': 'i-00000001', + 'previousState': {'code': 16, + 'name': 'running'}, + 'currentState': {'code': 80, + 'name': 'stopped'}}]} result = self.cloud.stop_instances(self.context, [instance_id]) - self.assertTrue(result) + self.assertEqual(result, expected) expected = {'instancesSet': [ {'instanceId': 'i-00000001', @@ -2384,8 +2402,14 @@ def test_terminate_instances_two_instances(self): inst1 = self._run_instance(**kwargs) inst2 = self._run_instance(**kwargs) + expected = {'instancesSet': [ + {'instanceId': 'i-00000001', + 'previousState': {'code': 16, + 'name': 'running'}, + 'currentState': {'code': 80, + 'name': 'stopped'}}]} result = self.cloud.stop_instances(self.context, [inst1]) - self.assertTrue(result) + self.assertEqual(result, expected) expected = {'instancesSet': [ {'instanceId': 'i-00000001', From 6507da5061fca1dbb0cdc7e6152e4b42055d4882 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 15 Dec 2013 08:10:46 -0800 Subject: [PATCH 045/486] vmware: VM diagnostics (v3 API only) There is no formal definition for the VM diagnostics. For the V2 API the diagnostics will be returned as they are today. This will support backward compatibility with the existing API's. Part of the blueprint v3-diagnostics DocImpact Change-Id: I6465ee6fc2edac0107e260297d9647c8a16b09ae --- nova/tests/virt/vmwareapi/test_driver_api.py | 16 ++++++++++++ nova/virt/vmwareapi/driver.py | 17 +++++++++++-- nova/virt/vmwareapi/vmops.py | 26 +++++++++++++++++++- 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index eebcec91db..20e520a3ed 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -1681,6 +1681,22 @@ def test_get_diagnostics(self): 'node': self.instance_node}), matchers.DictMatches(expected)) + def test_get_instance_diagnostics(self): + self._create_vm() + expected = {'uptime': 0, + 'memory_details': {'used': 0, 'maximum': 512}, + 'nic_details': [], + 'driver': 'vmwareapi', + 'state': 'running', + 'version': '1.0', + 'cpu_details': [], + 'disk_details': [], + 'hypervisor_os': 'esxi', + 'config_drive': False} + actual = self.conn.get_instance_diagnostics( + {'name': 1, 'uuid': self.uuid, 'node': self.instance_node}) + self.assertThat(actual.serialize(), matchers.DictMatches(expected)) + def test_get_console_output(self): self.assertRaises(NotImplementedError, self.conn.get_console_output, None, None) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 6ff077f4c7..a4e6421f32 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -265,7 +265,13 @@ def get_info(self, instance): def get_diagnostics(self, instance): """Return data about VM diagnostics.""" - return self._vmops.get_diagnostics(instance) + data = self._vmops.get_diagnostics(instance) + return data + + def get_instance_diagnostics(self, instance): + """Return data about VM diagnostics.""" + data = self._vmops.get_instance_diagnostics(instance) + return data def get_vnc_console(self, context, instance): """Return link to instance's VNC console.""" @@ -727,7 +733,14 @@ def get_info(self, instance): def get_diagnostics(self, instance): """Return data about VM diagnostics.""" _vmops = self._get_vmops_for_compute_node(instance['node']) - return _vmops.get_diagnostics(instance) + data = _vmops.get_diagnostics(instance) + return data + + def get_instance_diagnostics(self, instance): + """Return data about VM diagnostics.""" + _vmops = self._get_vmops_for_compute_node(instance['node']) + data = _vmops.get_instance_diagnostics(instance) + return data def host_power_action(self, host, action): """Host operations not supported by VC driver. diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index b5dee1f641..c6ff2ada1f 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -41,6 +41,7 @@ from nova.openstack.common import uuidutils from nova import utils from nova.virt import configdrive +from nova.virt import diagnostics from nova.virt import driver from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import ds_util @@ -1275,7 +1276,7 @@ def get_info(self, instance): 'num_cpu': int(query['summary.config.numCpu']), 'cpu_time': 0} - def get_diagnostics(self, instance): + def _get_diagnostics(self, instance): """Return data about VM diagnostics.""" vm_ref = vm_util.get_vm_ref(self._session, instance) lst_properties = ["summary.config", @@ -1292,9 +1293,32 @@ def get_diagnostics(self, instance): for value in query.values(): prop_dict = vim.object_to_dict(value, list_depth=1) data.update(prop_dict) + return data + + def get_diagnostics(self, instance): + """Return data about VM diagnostics.""" + data = self._get_diagnostics(instance) # Add a namespace to all of the diagnostsics return dict([('vmware:' + k, v) for k, v in data.items()]) + def get_instance_diagnostics(self, instance): + """Return data about VM diagnostics.""" + data = self._get_diagnostics(instance) + state = data.get('powerState') + if state: + state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]] + uptime = data.get('uptimeSeconds', 0) + config_drive = configdrive.required_by(instance) + diags = diagnostics.Diagnostics(state=state, + driver='vmwareapi', + config_drive=config_drive, + hypervisor_os='esxi', + uptime=uptime) + diags.memory_details.maximum = data.get('memorySizeMB', 0) + diags.memory_details.used = data.get('guestMemoryUsage', 0) + #TODO(garyk): add in cpu, nic and disk stats + return diags + def _get_vnc_console_connection(self, instance): """Return connection info for a vnc console.""" vm_ref = vm_util.get_vm_ref(self._session, instance) From 97b667d651b6ed543c6a11955b05fb5e2c5d0b49 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Mon, 14 Jul 2014 15:38:01 +1200 Subject: [PATCH 046/486] Don't mask out HostState details in WeighedHost WeighedHost's __repr__ was poking under the HostState hood to use .host in its own __repr__ but this hides some important details. Specifically it hides the hypervisor_hostname, which is important for Ironic as one 'node' can have thousands of hostnames. The other details such as available RAM and so on are also useful for ops trying to debug scheduling issues, so rather than add hypervisor_hostname, I am just delegating to the underlying __repr__. Change-Id: I79e55c32b3d0768430132275ebe050f38c63bc87 --- nova/scheduler/weights/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/weights/__init__.py b/nova/scheduler/weights/__init__.py index e3c7a07e8e..9d10c84765 100644 --- a/nova/scheduler/weights/__init__.py +++ b/nova/scheduler/weights/__init__.py @@ -31,8 +31,8 @@ def to_dict(self): return x def __repr__(self): - return "WeighedHost [host: %s, weight: %s]" % ( - self.obj.host, self.weight) + return "WeighedHost [host: %r, weight: %s]" % ( + self.obj, self.weight) class BaseHostWeigher(weights.BaseWeigher): From bf2c7d9f2cbb5cc9cdf3f2bebb73bce8840e9084 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Fri, 27 Jun 2014 16:20:33 +0930 Subject: [PATCH 047/486] Restore ability to delete aggregate metadata Commit I2c778f2237ba5bd2aa8335a0eae80f3aad3e9157 tightened the input validation for setting the metadata for an aggregate. However it also accidentally excluded sending null as the metadata value which has previously allowed the deletion of the metadata key rather than an update. This changeset relaxes the input validation to allow deletion of aggregate metadata again Change-Id: I54b91c2d421243e7e7521f2352598f73a258b88a Closes-Bug: 1334846 --- nova/api/openstack/compute/contrib/aggregates.py | 3 ++- nova/api/openstack/compute/plugins/v3/aggregates.py | 3 ++- .../openstack/compute/contrib/test_aggregates.py | 13 +++++++++++++ .../openstack/compute/plugins/v3/test_aggregates.py | 13 +++++++++++++ 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py index 9a136b3b81..9a035845d3 100644 --- a/nova/api/openstack/compute/contrib/aggregates.py +++ b/nova/api/openstack/compute/contrib/aggregates.py @@ -217,7 +217,8 @@ def _set_metadata(self, req, id, body): try: for key, value in metadata.items(): utils.check_string_length(key, "metadata.key", 1, 255) - utils.check_string_length(value, "metadata.value", 0, 255) + if value is not None: + utils.check_string_length(value, "metadata.value", 0, 255) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: diff --git a/nova/api/openstack/compute/plugins/v3/aggregates.py b/nova/api/openstack/compute/plugins/v3/aggregates.py index 364d0587bc..8accbedab6 100644 --- a/nova/api/openstack/compute/plugins/v3/aggregates.py +++ b/nova/api/openstack/compute/plugins/v3/aggregates.py @@ -177,7 +177,8 @@ def _set_metadata(self, req, id, body): try: for key, value in metadata.items(): utils.check_string_length(key, "metadata.key", 1, 255) - utils.check_string_length(value, "metadata.value", 0, 255) + if value is not None: + utils.check_string_length(value, "metadata.value", 0, 255) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py index a9aa6e4595..fe5828bd7d 100644 --- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py +++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py @@ -15,6 +15,7 @@ """Tests for the aggregates admin api.""" +import mock from webob import exc from nova.api.openstack.compute.contrib import aggregates @@ -487,6 +488,18 @@ def stub_update_aggregate(context, aggregate, values): self.assertEqual(AGGREGATE, result["aggregate"]) + def test_set_metadata_delete(self): + body = {"set_metadata": {"metadata": {"foo": None}}} + + with mock.patch.object(self.controller.api, + 'update_aggregate_metadata') as mocked: + mocked.return_value = AGGREGATE + result = self.controller.action(self.req, "1", body=body) + + self.assertEqual(AGGREGATE, result["aggregate"]) + mocked.assert_called_once_with(self.context, "1", + body["set_metadata"]["metadata"]) + def test_set_metadata_no_admin(self): self.assertRaises(exception.PolicyNotAuthorized, self.controller._set_metadata, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py b/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py index 323171dc3c..befe4a0533 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py @@ -15,6 +15,7 @@ """Tests for the aggregates admin api.""" +import mock from webob import exc from nova.api.openstack.compute.plugins.v3 import aggregates @@ -466,6 +467,18 @@ def stub_update_aggregate(context, aggregate, values): self.assertEqual(AGGREGATE, result["aggregate"]) + def test_set_metadata_delete(self): + body = {"set_metadata": {"metadata": {"foo": None}}} + + with mock.patch.object(self.controller.api, + 'update_aggregate_metadata') as mocked: + mocked.return_value = AGGREGATE + result = self.controller._set_metadata(self.req, "1", body=body) + + self.assertEqual(AGGREGATE, result["aggregate"]) + mocked.assert_called_once_with(self.context, "1", + body["set_metadata"]["metadata"]) + def test_set_metadata_no_admin(self): exc = self.assertRaises(exception.PolicyNotAuthorized, self.controller._set_metadata, From 556ccfce1fc807b4c3e85151e0fa8a8dccd5414a Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 14 Jul 2014 03:06:07 -0700 Subject: [PATCH 048/486] Security groups: add missing translation Commit d562012f34eadfe6b68dd5ebe06a2fa565de3b2e added exceptions that were not translated. Change-Id: I06dbd32a9bc6d99068e06576e4ea7a8594764db5 --- nova/network/security_group/security_group_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/network/security_group/security_group_base.py b/nova/network/security_group/security_group_base.py index 6710b2d2af..f033aeeb19 100644 --- a/nova/network/security_group/security_group_base.py +++ b/nova/network/security_group/security_group_base.py @@ -86,11 +86,11 @@ def _new_ingress_rule(ip_protocol, from_port, to_port, to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': - raise exception.InvalidInput(reason="Type and" - " Code must be integers for ICMP protocol type") + raise exception.InvalidInput(reason=_("Type and" + " Code must be integers for ICMP protocol type")) else: - raise exception.InvalidInput(reason="To and From ports " - "must be integers") + raise exception.InvalidInput(reason=_("To and From ports " + "must be integers")) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) From 6003726ee95e25410092e73441a5afe610ad4a53 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 9 Jun 2014 07:11:25 -0700 Subject: [PATCH 049/486] VMware: validate the network_info is defined Ensure that the network info is defined correctly prior to allocating networks. Commit 6fb9cc8b278ceab05e65e1c5145141203211d246 caused a degradation. Change-Id: I74eddc0ff744ee5d80df825a52527c67583bacea --- nova/tests/virt/vmwareapi/test_vif.py | 5 +++++ nova/virt/vmwareapi/vif.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/tests/virt/vmwareapi/test_vif.py b/nova/tests/virt/vmwareapi/test_vif.py index 5825b9ec95..0241b9e82a 100644 --- a/nova/tests/virt/vmwareapi/test_vif.py +++ b/nova/tests/virt/vmwareapi/test_vif.py @@ -329,6 +329,11 @@ def test_get_vif_info_none(self): 'is_neutron', 'fake_model', None) self.assertEqual([], vif_info) + def test_get_vif_info_empty_list(self): + vif_info = vif.get_vif_info('fake_session', 'fake_cluster', + 'is_neutron', 'fake_model', []) + self.assertEqual([], vif_info) + @mock.patch.object(vif, 'get_network_ref', return_value='fake_ref') def test_get_vif_info(self, mock_get_network_ref): network_info = utils.get_test_network_info() diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index f611ccf20a..30b70c7fd8 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -156,7 +156,7 @@ def get_network_ref(session, cluster, vif, is_neutron): def get_vif_info(session, cluster, is_neutron, vif_model, network_info): vif_infos = [] - if not network_info: + if network_info is None: return vif_infos for vif in network_info: mac_address = vif['address'] From acb47630f35716f4f3aa1f50d05e3b9280a0a1db Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Mon, 7 Jul 2014 16:44:13 -0400 Subject: [PATCH 050/486] Send compute.instance.create.end after launched_at is set Downstream consumers of notifications want to know when an instance was launched from the compute.instance.create.end notification. This value was previously set in the notification but commit 70196c4854e6cc1cb566dc9fc5b1a3397a4b69ab changed things so that it was no longer set. It will now be set, and is tested to be set. Change-Id: If5ac00529d52e3898cda7fd942535ed3ba2039d5 Closes-bug: 1338736 --- nova/compute/manager.py | 8 +++---- nova/tests/compute/test_compute_mgr.py | 29 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 82ee619558..59d44daa42 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1975,10 +1975,6 @@ def _build_and_run_instance(self, context, instance, image, injected_files, injected_files, admin_password, network_info=network_info, block_device_info=block_device_info) - self._notify_about_instance_usage(context, instance, - 'create.end', - extra_usage_info={'message': _('Success')}, - network_info=network_info) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): @@ -2037,6 +2033,10 @@ def _build_and_run_instance(self, context, instance, image, injected_files, instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.SPAWNING) + self._notify_about_instance_usage(context, instance, 'create.end', + extra_usage_info={'message': _('Success')}, + network_info=network_info) + @contextlib.contextmanager def _build_resources(self, context, instance, requested_networks, security_groups, image, block_device_mapping): diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 0d46cb12d6..9b6ddc5a7a 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -2329,6 +2329,35 @@ def test_cleanup_allocated_networks_instance_not_found(self): self.assertEqual('False', self.instance.system_metadata['network_allocated']) + @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update') + def test_launched_at_in_create_end_notification(self, + mock_instance_update): + + def fake_notify(*args, **kwargs): + if args[2] == 'create.end': + # Check that launched_at is set on the instance + self.assertIsNotNone(args[1].launched_at) + + with contextlib.nested( + mock.patch.object(self.compute.driver, 'spawn'), + mock.patch.object(self.compute, + '_build_networks_for_instance', return_value=[]), + mock.patch.object(self.instance, 'save'), + mock.patch.object(self.compute, '_notify_about_instance_usage', + side_effect=fake_notify) + ) as (mock_spawn, mock_networks, mock_save, mock_notify): + self.compute._build_and_run_instance(self.context, self.instance, + self.image, self.injected_files, self.admin_pass, + self.requested_networks, self.security_groups, + self.block_device_mapping, self.node, self.limits, + self.filter_properties) + expected_call = mock.call(self.context, self.instance, + 'create.end', extra_usage_info={'message': u'Success'}, + network_info=[]) + create_end_call = mock_notify.call_args_list[ + mock_notify.call_count - 1] + self.assertEqual(expected_call, create_end_call) + class ComputeManagerMigrationTestCase(test.NoDBTestCase): def setUp(self): From 1ecfe4cdad00155ca2219094dcc6d1a3c22e6f9b Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Mon, 30 Jun 2014 12:06:47 +0200 Subject: [PATCH 051/486] Move retry of prep_resize to conductor instead of scheduler Prep_resize in Compute Manager calls back Scheduler prep_resize if an Exception is raised. There is no sense of calling back the scheduler, it should call Conductor instead. Implements blueprint move-prep-resize-to-conductor Change-Id: I2131843c291f5bd01eb4dbd6c8f36a9218120c19 --- nova/compute/manager.py | 19 ++++++--------- nova/tests/compute/test_compute.py | 39 +++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 82ee619558..25ee5ca94f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1449,7 +1449,7 @@ def _reschedule_or_error(self, context, instance, exc_info, return rescheduled def _reschedule(self, context, request_spec, filter_properties, - instance, scheduler_method, method_args, task_state, + instance, reschedule_method, method_args, task_state, exc_info=None): """Attempt to re-schedule a compute operation.""" @@ -1469,7 +1469,7 @@ def _reschedule(self, context, request_spec, filter_properties, request_spec['instance_uuids'] = [instance_uuid] LOG.debug("Re-scheduling %(method)s: attempt %(num)d", - {'method': scheduler_method.func_name, + {'method': reschedule_method.func_name, 'num': retry['num_attempts']}, instance_uuid=instance_uuid) # reset the task state: @@ -1480,7 +1480,7 @@ def _reschedule(self, context, request_spec, filter_properties, retry['exc'] = traceback.format_exception_only(exc_info[0], exc_info[1]) - scheduler_method(context, *method_args) + reschedule_method(context, *method_args) return True @periodic_task.periodic_task @@ -3484,17 +3484,14 @@ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, instance_uuid = instance['uuid'] try: - # NOTE(comstud): remove the scheduler RPCAPI method when - # this is adjusted to send to conductor... and then - # deprecate the scheduler manager method. - scheduler_method = self.scheduler_rpcapi.prep_resize - instance_p = obj_base.obj_to_primitive(instance) - method_args = (instance_p, instance_type, image, request_spec, - filter_properties, quotas.reservations) + reschedule_method = self.compute_task_api.resize_instance + scheduler_hint = dict(filter_properties=filter_properties) + method_args = (instance, None, scheduler_hint, instance_type, + quotas.reservations) task_state = task_states.RESIZE_PREP rescheduled = self._reschedule(context, request_spec, - filter_properties, instance, scheduler_method, + filter_properties, instance, reschedule_method, method_args, task_state, exc_info) except Exception as error: rescheduled = False diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 377e89430a..13d2e8b39d 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -150,6 +150,13 @@ def prep_resize(self, ctxt, instance, instance_type, image, request_spec, pass +class FakeComputeTaskAPI(object): + + def resize_instance(self, context, instance, extra_instance_updates, + scheduler_hint, flavor, reservations): + pass + + class BaseTestCase(test.TestCase): def setUp(self): @@ -231,7 +238,10 @@ def fake_show(meh, context, id): self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) fake_rpcapi = FakeSchedulerAPI() + fake_taskapi = FakeComputeTaskAPI() self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi) + self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi) + fake_network.set_stub_network_methods(self.stubs) fake_server_actions.stub_out_action_events(self.stubs) @@ -10133,12 +10143,12 @@ def _reschedule(self, request_spec=None, filter_properties=None, instance = fake_instance.fake_db_instance(uuid=instance_uuid) instance = self._objectify(instance) instance_type = {} - image = None reservations = None - scheduler_method = self.compute.scheduler_rpcapi.prep_resize - method_args = (instance, instance_type, image, request_spec, - filter_properties, reservations) + scheduler_method = self.compute.compute_task_api.resize_instance + scheduler_hint = dict(filter_properties=filter_properties) + method_args = (instance, None, scheduler_hint, instance_type, + reservations) return self.compute._reschedule(self.context, request_spec, filter_properties, instance, scheduler_method, @@ -10425,13 +10435,14 @@ def test_reschedule_fails_with_exception(self): raises another exception """ instance = self._create_fake_instance_obj() - method_args = (None, instance, self.instance_type, None, None, - None) + scheduler_hint = dict(filter_properties={}) + method_args = (instance, None, scheduler_hint, self.instance_type, + None) self.mox.StubOutWithMock(self.compute, "_reschedule") self.compute._reschedule( self.context, None, None, instance, - self.compute.scheduler_rpcapi.prep_resize, method_args, + self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP).AndRaise( InnerTestingException("Inner")) self.mox.ReplayAll() @@ -10450,12 +10461,14 @@ def test_reschedule_false(self): rescheduled. """ instance = self._create_fake_instance_obj() - method_args = (None, instance, self.instance_type, None, None, None) + scheduler_hint = dict(filter_properties={}) + method_args = (instance, None, scheduler_hint, self.instance_type, + None) self.mox.StubOutWithMock(self.compute, "_reschedule") self.compute._reschedule( self.context, None, None, instance, - self.compute.scheduler_rpcapi.prep_resize, method_args, + self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP).AndReturn(False) self.mox.ReplayAll() @@ -10471,8 +10484,10 @@ def test_reschedule_false(self): def test_reschedule_true(self): # If rescheduled, the original resize exception should be logged. instance = self._create_fake_instance_obj() - instance_p = obj_base.obj_to_primitive(instance) - method_args = (instance_p, self.instance_type, None, {}, {}, None) + scheduler_hint = dict(filter_properties={}) + method_args = (instance, None, scheduler_hint, self.instance_type, + None) + try: raise test.TestingException("Original") except Exception: @@ -10482,7 +10497,7 @@ def test_reschedule_true(self): self.mox.StubOutWithMock(self.compute, "_log_original_error") self.compute._reschedule(self.context, {}, {}, instance, - self.compute.scheduler_rpcapi.prep_resize, method_args, + self.compute.compute_task_api.resize_instance, method_args, task_states.RESIZE_PREP, exc_info).AndReturn(True) self.compute._log_original_error(exc_info, instance.uuid) From 34496b924d7b97712d5133199725558f36214d03 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 13 Jan 2014 14:25:44 +0000 Subject: [PATCH 052/486] XenAPI: VM diagnostics for v3 API For V3 API the diagnostics will have the format defined at: https://wiki.openstack.org/wiki/Nova_VM_Diagnostics This will support backward compatability with the existing APIs Part of the blueprint v3-diagnostics DocImpact Change-Id: I8377f86a97ca554028e4ca8c1c1af7fd5643cc2e --- nova/tests/virt/xenapi/test_xenapi.py | 73 ++++++++++++++++++++------- nova/virt/xenapi/driver.py | 4 ++ nova/virt/xenapi/fake.py | 9 ++++ nova/virt/xenapi/vm_utils.py | 29 ++++++++++- nova/virt/xenapi/vmops.py | 6 +++ 5 files changed, 102 insertions(+), 19 deletions(-) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 448c323d71..b182aeaa20 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -367,6 +367,22 @@ def test_get_rrd_server(self): self.assertEqual(server_info[0], 'myscheme') self.assertEqual(server_info[1], 'myaddress') + expected_raw_diagnostics = { + 'vbd_xvdb_write': '0.0', + 'memory_target': '4294967296.0000', + 'memory_internal_free': '1415564.0000', + 'memory': '4294967296.0000', + 'vbd_xvda_write': '0.0', + 'cpu0': '0.0042', + 'vif_0_tx': '287.4134', + 'vbd_xvda_read': '0.0', + 'vif_0_rx': '1816.0144', + 'vif_2_rx': '0.0', + 'vif_2_tx': '0.0', + 'vbd_xvdb_read': '0.0', + 'last_update': '1328795567', + } + def test_get_diagnostics(self): def fake_get_rrd(host, vm_uuid): path = os.path.dirname(os.path.realpath(__file__)) @@ -374,24 +390,47 @@ def fake_get_rrd(host, vm_uuid): return re.sub(r'\s', '', f.read()) self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd) - fake_diagnostics = { - 'vbd_xvdb_write': '0.0', - 'memory_target': '4294967296.0000', - 'memory_internal_free': '1415564.0000', - 'memory': '4294967296.0000', - 'vbd_xvda_write': '0.0', - 'cpu0': '0.0042', - 'vif_0_tx': '287.4134', - 'vbd_xvda_read': '0.0', - 'vif_0_rx': '1816.0144', - 'vif_2_rx': '0.0', - 'vif_2_tx': '0.0', - 'vbd_xvdb_read': '0.0', - 'last_update': '1328795567', - } + expected = self.expected_raw_diagnostics + instance = self._create_instance() + actual = self.conn.get_diagnostics(instance) + self.assertThat(actual, matchers.DictMatches(expected)) + + def test_get_instance_diagnostics(self): + def fake_get_rrd(host, vm_uuid): + path = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(path, 'vm_rrd.xml')) as f: + return re.sub(r'\s', '', f.read()) + self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd) + + expected = { + 'config_drive': False, + 'state': 'running', + 'driver': 'xenapi', + 'version': '1.0', + 'uptime': 0, + 'hypervisor_os': None, + 'cpu_details': [{'time': 0}, {'time': 0}, + {'time': 0}, {'time': 0}], + 'nic_details': [{'mac_address': '00:00:00:00:00:00', + 'rx_drop': 0, + 'rx_errors': 0, + 'rx_octets': 0, + 'rx_packets': 0, + 'tx_drop': 0, + 'tx_errors': 0, + 'tx_octets': 0, + 'tx_packets': 0}], + 'disk_details': [{'errors_count': 0, + 'id': '', + 'read_bytes': 0, + 'read_requests': 0, + 'write_bytes': 0, + 'write_requests': 0}], + 'memory_details': {'maximum': 8192, 'used': 0}} + instance = self._create_instance() - expected = self.conn.get_diagnostics(instance) - self.assertThat(fake_diagnostics, matchers.DictMatches(expected)) + actual = self.conn.get_instance_diagnostics(instance) + self.assertEqual(expected, actual.serialize()) def test_get_vnc_console(self): instance = self._create_instance(obj=True) diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index f89ed84b14..4d67c50783 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -344,6 +344,10 @@ def get_diagnostics(self, instance): """Return data about VM diagnostics.""" return self._vmops.get_diagnostics(instance) + def get_instance_diagnostics(self, instance): + """Return data about VM diagnostics.""" + return self._vmops.get_instance_diagnostics(instance) + def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM. diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index baa55a3b1e..4e1c185831 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -237,6 +237,14 @@ def after_VBD_create(vbd_ref, vbd_rec): vdi_rec['VBDs'].append(vbd_ref) +def after_VIF_create(vif_ref, vif_rec): + """Create backref from VM to VIF when VIF is created. + """ + vm_ref = vif_rec['VM'] + vm_rec = _db_content['VM'][vm_ref] + vm_rec['VIFs'].append(vif_ref) + + def after_VM_create(vm_ref, vm_rec): """Create read-only fields in the VM record.""" vm_rec.setdefault('domid', -1) @@ -246,6 +254,7 @@ def after_VM_create(vm_ref, vm_rec): vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi)) vm_rec.setdefault('VCPUs_max', str(4)) vm_rec.setdefault('VBDs', []) + vm_rec.setdefault('VIFs', []) vm_rec.setdefault('resident_on', '') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4dfbce2927..2c65ef51f1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -50,6 +50,7 @@ from nova.openstack.common import xmlutils from nova import utils from nova.virt import configdrive +from nova.virt import diagnostics from nova.virt.disk import api as disk from nova.virt.disk.vfs import localfs as vfsimpl from nova.virt import hardware @@ -1784,12 +1785,36 @@ def compile_info(session, vm_ref): 'cpu_time': 0} -def compile_diagnostics(record): +def compile_instance_diagnostics(instance, vm_rec): + vm_power_state_int = XENAPI_POWER_STATE[vm_rec['power_state']] + vm_power_state = power_state.STATE_MAP[vm_power_state_int] + config_drive = configdrive.required_by(instance) + + diags = diagnostics.Diagnostics(state=vm_power_state, + driver='xenapi', + config_drive=config_drive) + + for cpu_num in range(0, long(vm_rec['VCPUs_max'])): + diags.add_cpu() + + for vif in vm_rec['VIFs']: + diags.add_nic() + + for vbd in vm_rec['VBDs']: + diags.add_disk() + + max_mem_bytes = long(vm_rec['memory_dynamic_max']) + diags.memory_details.maximum = max_mem_bytes / units.Mi + + return diags + + +def compile_diagnostics(vm_rec): """Compile VM diagnostics data.""" try: keys = [] diags = {} - vm_uuid = record["uuid"] + vm_uuid = vm_rec["uuid"] xml = _get_rrd(_get_rrd_server(), vm_uuid) if xml: rrd = xmlutils.safe_minidom_parse_string(xml) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c7db7ea992..fe47875467 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1512,6 +1512,12 @@ def get_diagnostics(self, instance): vm_rec = self._session.call_xenapi("VM.get_record", vm_ref) return vm_utils.compile_diagnostics(vm_rec) + def get_instance_diagnostics(self, instance): + """Return data about VM diagnostics using the common API.""" + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.VM.get_record(vm_ref) + return vm_utils.compile_instance_diagnostics(instance, vm_rec) + def _get_vif_device_map(self, vm_rec): vif_map = {} for vif in [self._session.call_xenapi("VIF.get_record", vrec) From d0248d0621e8d54a2d1c91f76ccdcb4bedd46649 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 9 Jul 2014 17:17:50 -0500 Subject: [PATCH 053/486] libvirt: Support IPv6 with LXC Libvirt's LXC implementation exposes a read-only `/proc/sys/net` to the guests. This causes some of the guest's default network configuration scripts to fail. This patch works-around the issue by using `post-up` hooks to configure IPv6. Closes-Bug: 1340791 Change-Id: I805cad98d855fcb2c90b07e98ad3653d1620bd42 --- nova/tests/network/test_network_info.py | 74 ++++++++++++++++++++++++- nova/virt/interfaces.template | 9 +++ nova/virt/libvirt/driver.py | 3 +- nova/virt/netutils.py | 8 ++- 4 files changed, 88 insertions(+), 6 deletions(-) diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py index 6ca75331eb..e974fa190b 100644 --- a/nova/tests/network/test_network_info.py +++ b/nova/tests/network/test_network_info.py @@ -506,7 +506,8 @@ def test_hydrate(self): def _setup_injected_network_scenario(self, should_inject=True, use_ipv4=True, use_ipv6=False, gateway=True, dns=True, - two_interfaces=False): + two_interfaces=False, + libvirt_virt_type=None): """Check that netutils properly decides whether to inject based on whether the supplied subnet is static or dynamic. """ @@ -548,8 +549,8 @@ def _setup_injected_network_scenario(self, should_inject=True, vifs.append(vif) nwinfo = model.NetworkInfo(vifs) - return netutils.get_injected_network_template(nwinfo, - use_ipv6=use_ipv6) + return netutils.get_injected_network_template( + nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type) def test_injection_dynamic(self): expected = None @@ -714,3 +715,70 @@ def test_injection_ipv6_two_interfaces(self): template = self._setup_injected_network_scenario(use_ipv6=True, two_interfaces=True) self.assertEqual(expected, template) + + def test_injection_ipv6_with_lxc(self): + expected = """\ +# Injected by Nova on instance boot +# +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 10.10.0.2 + netmask 255.255.255.0 + broadcast 10.10.0.255 + gateway 10.10.0.1 + dns-nameservers 1.2.3.4 2.3.4.5 + post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} + post-up ip -6 route add default via 1234:567::1 dev ${IFACE} + +auto eth1 +iface eth1 inet static + address 10.10.0.2 + netmask 255.255.255.0 + broadcast 10.10.0.255 + gateway 10.10.0.1 + dns-nameservers 1.2.3.4 2.3.4.5 + post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} + post-up ip -6 route add default via 1234:567::1 dev ${IFACE} +""" + template = self._setup_injected_network_scenario( + use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc') + self.assertEqual(expected, template) + + def test_injection_ipv6_with_lxc_no_gateway(self): + expected = """\ +# Injected by Nova on instance boot +# +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 10.10.0.2 + netmask 255.255.255.0 + broadcast 10.10.0.255 + dns-nameservers 1.2.3.4 2.3.4.5 + post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} + +auto eth1 +iface eth1 inet static + address 10.10.0.2 + netmask 255.255.255.0 + broadcast 10.10.0.255 + dns-nameservers 1.2.3.4 2.3.4.5 + post-up ip -6 addr add 1234:567::2/48 dev ${IFACE} +""" + template = self._setup_injected_network_scenario( + use_ipv6=True, gateway=False, two_interfaces=True, + libvirt_virt_type='lxc') + self.assertEqual(expected, template) diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template index ce5ec05a34..0ec7a5564d 100644 --- a/nova/virt/interfaces.template +++ b/nova/virt/interfaces.template @@ -20,6 +20,14 @@ iface {{ ifc.name }} inet static dns-nameservers {{ ifc.dns }} {% endif %} {% if use_ipv6 %} +{% if libvirt_virt_type == 'lxc' %} +{% if ifc.address_v6 %} + post-up ip -6 addr add {{ ifc.address_v6 }}/{{ifc.netmask_v6 }} dev ${IFACE} +{% endif %} +{% if ifc.gateway_v6 %} + post-up ip -6 route add default via {{ ifc.gateway_v6 }} dev ${IFACE} +{% endif %} +{% else %} iface {{ ifc.name }} inet6 static address {{ ifc.address_v6 }} netmask {{ ifc.netmask_v6 }} @@ -27,4 +35,5 @@ iface {{ ifc.name }} inet6 static gateway {{ ifc.gateway_v6 }} {% endif %} {% endif %} +{% endif %} {% endfor %} diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 31712fdfd8..934fda6326 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2574,7 +2574,8 @@ def _inject_data(self, instance, network_info, admin_pass, files, suffix): admin_pass = None # Handles the network injection. - net = netutils.get_injected_network_template(network_info) + net = netutils.get_injected_network_template( + network_info, libvirt_virt_type=CONF.libvirt.virt_type) # Handles the metadata injection metadata = instance.get('metadata') diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py index 225607cf34..381d555445 100644 --- a/nova/virt/netutils.py +++ b/nova/virt/netutils.py @@ -59,7 +59,8 @@ def _get_first_network(network, version): def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6, - template=CONF.injected_network_template): + template=CONF.injected_network_template, + libvirt_virt_type=None): """Returns a rendered network template for the given network_info. :param network_info: @@ -67,6 +68,8 @@ def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6, :param use_ipv6: If False, do not return IPv6 template information even if an IPv6 subnet is present in network_info. :param template: Path to the interfaces template file. + :param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for + other hypervisors.. """ if not (network_info and template): return @@ -145,4 +148,5 @@ def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6, trim_blocks=True) template = env.get_template(tmpl_file) return template.render({'interfaces': nets, - 'use_ipv6': ipv6_is_available}) + 'use_ipv6': ipv6_is_available, + 'libvirt_virt_type': libvirt_virt_type}) From ffd71a6c120aa1700f263c26f030445ed2579052 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 11 Jul 2014 10:39:23 -0500 Subject: [PATCH 054/486] xenapi: Use netuils.get_injected_network_template The Xen driver duplicates quite a bit of code used to inject the `interfaces` template into the guest. This patch makes the Xen driver use the generic `netutils` version. Parameter defaults were moved from the signature into the method since in the original code, you wouldn't be able to override them due to their value being populated at import-time, not run-time. Change-Id: I4cd1631b8e6ec2dc3fae1f8eb0362aecead53f1e --- nova/tests/virt/xenapi/test_xenapi.py | 2 +- nova/virt/netutils.py | 9 ++- nova/virt/xenapi/vm_utils.py | 103 +------------------------- 3 files changed, 12 insertions(+), 102 deletions(-) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index d801018716..1938e6a028 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -894,7 +894,7 @@ def _tee_handler(cmd, **kwargs): dns-nameservers 192.168.1.3 192.168.1.4 iface eth0 inet6 static address 2001:db8:0:1::1 - netmask ffff:ffff:ffff:ffff:: + netmask 64 gateway 2001:db8:0:1::1 """ self.assertEqual(expected, actual) diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py index 381d555445..4575e9f94d 100644 --- a/nova/virt/netutils.py +++ b/nova/virt/netutils.py @@ -58,8 +58,7 @@ def _get_first_network(network, version): pass -def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6, - template=CONF.injected_network_template, +def get_injected_network_template(network_info, use_ipv6=None, template=None, libvirt_virt_type=None): """Returns a rendered network template for the given network_info. @@ -71,6 +70,12 @@ def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6, :param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for other hypervisors.. """ + if use_ipv6 is None: + use_ipv6 = CONF.use_ipv6 + + if not template: + template = CONF.injected_network_template + if not (network_info and template): return diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a655b1dc8c..f40b6eb92c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -53,6 +53,7 @@ from nova.virt.disk import api as disk from nova.virt.disk.vfs import localfs as vfsimpl from nova.virt import hardware +from nova.virt import netutils from nova.virt.xenapi import agent from nova.virt.xenapi.image import utils as image_utils from nova.virt.xenapi import volume_utils @@ -1736,12 +1737,13 @@ def lookup(session, name_label, check_rescue=False): def preconfigure_instance(session, instance, vdi_ref, network_info): """Makes alterations to the image before launching as part of spawn. """ + key = str(instance['key_data']) + net = netutils.get_injected_network_template(network_info) + metadata = instance['metadata'] # As mounting the image VDI is expensive, we only want do it once, # if at all, so determine whether it's required first, and then do # everything - mount_required = False - key, net, metadata = _prepare_injectables(instance, network_info) mount_required = key or net or metadata if not mount_required: return @@ -2455,103 +2457,6 @@ def _mounted_processing(device, key, net, metadata): 'non-linux instances): %s') % err) -def _prepare_injectables(inst, network_info): - """prepares the ssh key and the network configuration file to be - injected into the disk image - """ - #do the import here - Jinja2 will be loaded only if injection is performed - import jinja2 - tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template) - env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path), - trim_blocks=True) - template = env.get_template(tmpl_file) - - metadata = inst['metadata'] - key = str(inst['key_data']) - net = None - if network_info: - ifc_num = -1 - interfaces_info = [] - for vif in network_info: - ifc_num += 1 - try: - if not vif['network'].get_meta('injected'): - # network is not specified injected - continue - except KeyError: - # vif network is None - continue - - # NOTE(tr3buchet): using all subnets in case dns is stored in a - # subnet that isn't chosen as first v4 or v6 - # subnet in the case where there is more than one - # dns = list of address of each dns entry from each vif subnet - dns = [ip['address'] for subnet in vif['network']['subnets'] - for ip in subnet['dns']] - dns = ' '.join(dns).strip() - - interface_info = {'name': 'eth%d' % ifc_num, - 'address': '', - 'netmask': '', - 'gateway': '', - 'broadcast': '', - 'dns': dns or '', - 'address_v6': '', - 'netmask_v6': '', - 'gateway_v6': '', - 'use_ipv6': CONF.use_ipv6} - - # NOTE(tr3buchet): the original code used the old network_info - # which only supported a single ipv4 subnet - # (and optionally, a single ipv6 subnet). - # I modified it to use the new network info model, - # which adds support for multiple v4 or v6 - # subnets. I chose to ignore any additional - # subnets, just as the original code ignored - # additional IP information - - # populate v4 info if v4 subnet and ip exist - try: - # grab the first v4 subnet (or it raises) - subnet = [s for s in vif['network']['subnets'] - if s['version'] == 4][0] - # get the subnet's first ip (or it raises) - ip = subnet['ips'][0] - - # populate interface_info - subnet_netaddr = subnet.as_netaddr() - interface_info['address'] = ip['address'] - interface_info['netmask'] = subnet_netaddr.netmask - interface_info['gateway'] = subnet['gateway']['address'] - interface_info['broadcast'] = subnet_netaddr.broadcast - except IndexError: - # there isn't a v4 subnet or there are no ips - pass - - # populate v6 info if v6 subnet and ip exist - try: - # grab the first v6 subnet (or it raises) - subnet = [s for s in vif['network']['subnets'] - if s['version'] == 6][0] - # get the subnet's first ip (or it raises) - ip = subnet['ips'][0] - - # populate interface_info - interface_info['address_v6'] = ip['address'] - interface_info['netmask_v6'] = subnet.as_netaddr().netmask - interface_info['gateway_v6'] = subnet['gateway']['address'] - except IndexError: - # there isn't a v6 subnet or there are no ips - pass - - interfaces_info.append(interface_info) - - if interfaces_info: - net = template.render({'interfaces': interfaces_info, - 'use_ipv6': CONF.use_ipv6}) - return key, net, metadata - - def ensure_correct_host(session): """Ensure we're connected to the host we're running on. This is the required configuration for anything that uses vdi_attached_here. From a73e31022c9fef532cc2d66462d13f4ab2358565 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Fri, 13 Jun 2014 18:53:51 +0300 Subject: [PATCH 055/486] Fixes Hyper-V SCSI slot selection Currently, when attaching volumes, the Hyper-V driver selects the slot on the SCSI controller by using the number of drives attached to that controller. This leads to exceptions when detaching volumes having lower numbered slots and then attaching a new volume, as Hyper-V will then try to use an occupied slot. This patch fixes the issue by simply getting a list of used slots and getting a slot that it's not among those. It also checks for the slot number to be smaller than the maximum number of slots per SCSI controller. Fixes bug: #1329764 Change-Id: Id0c09c4debc848004a78971670cc3caef2f77ce3 --- nova/tests/virt/hyperv/test_hypervapi.py | 21 +++++++++++++++++++-- nova/virt/hyperv/constants.py | 2 ++ nova/virt/hyperv/vmutils.py | 4 ++-- nova/virt/hyperv/volumeops.py | 11 +++++++++-- 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 9b6cf1e15e..ca4e8c96ad 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -164,7 +164,7 @@ def fake_vmutils__init__(self, host='.'): self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection') self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller') self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller') - self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count') + self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks') self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_volume_to_controller') self._mox.StubOutWithMock(vmutils.VMUtils, @@ -1121,6 +1121,8 @@ def _mock_attach_volume(self, instance_name, target_iqn, target_lun, fake_mounted_disk = "fake_mounted_disk" fake_device_number = 0 fake_controller_path = 'fake_scsi_controller_path' + self._mox.StubOutWithMock(self._conn._volumeops, + '_get_free_controller_slot') self._mock_login_storage_target(target_iqn, target_lun, target_portal, @@ -1140,7 +1142,8 @@ def _mock_attach_volume(self, instance_name, target_iqn, target_lun, m.AndReturn(fake_controller_path) fake_free_slot = 1 - m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path) + m = self._conn._volumeops._get_free_controller_slot( + fake_controller_path) m.AndReturn(fake_free_slot) m = vmutils.VMUtils.attach_volume_to_controller(instance_name, @@ -1732,3 +1735,17 @@ def test_get_mounted_disk_from_lun_failure(self): self.assertRaises(exception.NotFound, self.volumeops._get_mounted_disk_from_lun, target_iqn, target_lun) + + def test_get_free_controller_slot_exception(self): + fake_drive = mock.MagicMock() + type(fake_drive).AddressOnParent = mock.PropertyMock( + side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER)) + fake_scsi_controller_path = 'fake_scsi_controller_path' + + with mock.patch.object(self.volumeops._vmutils, + 'get_attached_disks') as fake_get_attached_disks: + fake_get_attached_disks.return_value = ( + [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER) + self.assertRaises(vmutils.HyperVException, + self.volumeops._get_free_controller_slot, + fake_scsi_controller_path) diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py index fc12ca9c75..e1e2ec9037 100644 --- a/nova/virt/hyperv/constants.py +++ b/nova/virt/hyperv/constants.py @@ -73,3 +73,5 @@ VHD_TYPE_FIXED = 2 VHD_TYPE_DYNAMIC = 3 + +SCSI_CONTROLLER_SLOTS_NUMBER = 64 diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index e036ba89ff..a8e1126f30 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -266,7 +266,7 @@ def get_vm_ide_controller(self, vm_name, ctrller_addr): vm = self._lookup_vm_check(vm_name) return self._get_vm_ide_controller(vm, ctrller_addr) - def get_attached_disks_count(self, scsi_controller_path): + def get_attached_disks(self, scsi_controller_path): volumes = self._conn.query("SELECT * FROM %(class_name)s " "WHERE ResourceSubType = " "'%(res_sub_type)s' AND " @@ -277,7 +277,7 @@ def get_attached_disks_count(self, scsi_controller_path): self._PHYS_DISK_RES_SUB_TYPE, 'parent': scsi_controller_path.replace("'", "''")}) - return len(volumes) + return volumes def _get_new_setting_data(self, class_name): return self._conn.query("SELECT * FROM %s WHERE InstanceID " diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index 7b3b0598fb..f2ac109484 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -26,7 +26,9 @@ from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt import driver +from nova.virt.hyperv import constants from nova.virt.hyperv import utilsfactory +from nova.virt.hyperv import vmutils LOG = logging.getLogger(__name__) @@ -149,8 +151,13 @@ def attach_volume(self, connection_info, instance_name, ebs_root=False): self._volutils.logout_storage_target(target_iqn) def _get_free_controller_slot(self, scsi_controller_path): - #Slots starts from 0, so the length of the disks gives us the free slot - return self._vmutils.get_attached_disks_count(scsi_controller_path) + attached_disks = self._vmutils.get_attached_disks(scsi_controller_path) + used_slots = [int(disk.AddressOnParent) for disk in attached_disks] + + for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER): + if slot not in used_slots: + return slot + raise vmutils.HyperVException("Exceeded the maximum number of slots") def detach_volumes(self, block_device_info, instance_name): mapping = driver.block_device_info_get_mapping(block_device_info) From 2df5896546d807fa9bb6a13d9dd1419c5a9ca706 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 16 Jul 2014 03:23:11 -0700 Subject: [PATCH 056/486] Update requirements to include decorator>=3.4.0 This third party package is used but there was no specific requirement for it. Change-Id: I50fcc7a1c4029b36e02e75eaf27df7ca791aef4e --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 6949d82614..655599460f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,7 @@ SQLAlchemy>=0.8.4,!=0.9.5,<=0.9.99 anyjson>=0.3.3 argparse boto>=2.12.0,!=2.13.0 +decorator>=3.4.0 eventlet>=0.13.0 Jinja2 keystonemiddleware From 8b6ac08b0816c96ba7f2a40fb85413f58ae8faf7 Mon Sep 17 00:00:00 2001 From: Sulochan Acharya Date: Thu, 3 Jul 2014 21:03:53 +0000 Subject: [PATCH 057/486] Adds more policy control to cells ext Allow create,delete,update and sync_instances to have fine grained policy control. UpgradeImpact DocImpact Change-Id: I2e32ae2f37d3b599585d25535c79eecf6485b462 Closes-Bug: 1335901 --- etc/nova/policy.json | 8 ++ nova/api/openstack/compute/contrib/cells.py | 12 ++ .../api/openstack/compute/plugins/v3/cells.py | 12 ++ .../openstack/compute/contrib/test_cells.py | 105 ++++++++++++++++- .../compute/plugins/v3/test_cells.py | 107 +++++++++++++++++- nova/tests/fake_policy.py | 8 ++ 6 files changed, 243 insertions(+), 9 deletions(-) diff --git a/etc/nova/policy.json b/etc/nova/policy.json index cc5b8ea4a8..6444f5ab3c 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -66,7 +66,15 @@ "compute_extension:v3:os-attach-interfaces:discoverable": "", "compute_extension:baremetal_nodes": "rule:admin_api", "compute_extension:cells": "rule:admin_api", + "compute_extension:cells:create": "rule:admin_api", + "compute_extension:cells:delete": "rule:admin_api", + "compute_extension:cells:update": "rule:admin_api", + "compute_extension:cells:sync_instances": "rule:admin_api", "compute_extension:v3:os-cells": "rule:admin_api", + "compute_extension:v3:os-cells:create": "rule:admin_api", + "compute_extension:v3:os-cells:delete": "rule:admin_api", + "compute_extension:v3:os-cells:update": "rule:admin_api", + "compute_extension:v3:os-cells:sync_instances": "rule:admin_api", "compute_extension:v3:os-cells:discoverable": "", "compute_extension:certificates": "", "compute_extension:v3:os-certificates:create": "", diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py index 3937c24338..e2595021ba 100644 --- a/nova/api/openstack/compute/contrib/cells.py +++ b/nova/api/openstack/compute/contrib/cells.py @@ -270,7 +270,10 @@ def show(self, req, id): def delete(self, req, id): """Delete a child or parent cell entry. 'id' is a cell name.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="delete") + try: num_deleted = self.cells_rpcapi.cell_delete(context, id) except exception.CellsUpdateUnsupported as e: @@ -347,7 +350,10 @@ def _normalize_cell(self, cell, existing=None): def create(self, req, body): """Create a child cell entry.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="create") + if 'cell' not in body: msg = _("No cell information in request") LOG.error(msg) @@ -371,7 +377,10 @@ def create(self, req, body): def update(self, req, id, body): """Update a child cell entry. 'id' is the cell name to update.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="update") + if 'cell' not in body: msg = _("No cell information in request") LOG.error(msg) @@ -403,7 +412,10 @@ def update(self, req, id, body): def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="sync_instances") + project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) diff --git a/nova/api/openstack/compute/plugins/v3/cells.py b/nova/api/openstack/compute/plugins/v3/cells.py index 31c3ffb133..d542d85449 100644 --- a/nova/api/openstack/compute/plugins/v3/cells.py +++ b/nova/api/openstack/compute/plugins/v3/cells.py @@ -177,7 +177,10 @@ def show(self, req, id): def delete(self, req, id): """Delete a child or parent cell entry. 'id' is a cell name.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="delete") + try: num_deleted = self.cells_rpcapi.cell_delete(context, id) except exception.CellsUpdateUnsupported as e: @@ -254,7 +257,10 @@ def _normalize_cell(self, cell, existing=None): def create(self, req, body): """Create a child cell entry.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="create") + if 'cell' not in body: msg = _("No cell information in request") LOG.error(msg) @@ -277,7 +283,10 @@ def create(self, req, body): def update(self, req, id, body): """Update a child cell entry. 'id' is the cell name to update.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="update") + if 'cell' not in body: msg = _("No cell information in request") LOG.error(msg) @@ -311,7 +320,10 @@ def update(self, req, id, body): def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] + authorize(context) + authorize(context, action="sync_instances") + project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py index 414ad33213..3fc6bfb525 100644 --- a/nova/tests/api/openstack/compute/contrib/test_cells.py +++ b/nova/tests/api/openstack/compute/contrib/test_cells.py @@ -133,7 +133,7 @@ def test_get_cell_by_name(self): self.assertEqual(cell['rpc_host'], 'r1.example.org') self.assertNotIn('password', cell) - def test_cell_delete(self): + def _cell_delete(self): call_info = {'delete_called': 0} def fake_cell_delete(inst, context, cell_name): @@ -143,9 +143,20 @@ def fake_cell_delete(inst, context, cell_name): self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete) req = self._get_request("cells/cell999") + req.environ['nova.context'] = self.context self.controller.delete(req, 'cell999') self.assertEqual(call_info['delete_called'], 1) + def test_cell_delete(self): + # Test cell delete with just cell policy + rules = {"default": "is_admin:true", + "compute_extension:cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_delete() + + def test_cell_delete_with_delete_policy(self): + self._cell_delete() + def test_delete_bogus_cell_raises(self): def fake_cell_delete(inst, context, cell_name): return 0 @@ -157,7 +168,19 @@ def fake_cell_delete(inst, context, cell_name): self.assertRaises(exc.HTTPNotFound, self.controller.delete, req, 'cell999') - def test_cell_create_parent(self): + def test_cell_delete_fails_for_invalid_policy(self): + def fake_cell_delete(inst, context, cell_name): + pass + + self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete) + + req = self._get_request("cells/cell999") + req.environ['nova.context'] = self.context + req.environ["nova.context"].is_admin = False + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.delete, req, 'cell999') + + def _cell_create_parent(self): body = {'cell': {'name': 'meow', 'username': 'fred', 'password': 'fubar', @@ -167,6 +190,7 @@ def test_cell_create_parent(self): 'is_parent': False}} req = self._get_request("cells") + req.environ['nova.context'] = self.context res_dict = self.controller.create(req, body) cell = res_dict['cell'] @@ -177,7 +201,17 @@ def test_cell_create_parent(self): self.assertNotIn('password', cell) self.assertNotIn('is_parent', cell) - def test_cell_create_child(self): + def test_cell_create_parent(self): + # Test create with just cells policy + rules = {"default": "is_admin:true", + "compute_extension:cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_create_parent() + + def test_cell_create_parent_with_create_policy(self): + self._cell_create_parent() + + def _cell_create_child(self): body = {'cell': {'name': 'meow', 'username': 'fred', 'password': 'fubar', @@ -185,6 +219,7 @@ def test_cell_create_child(self): 'type': 'child'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context res_dict = self.controller.create(req, body) cell = res_dict['cell'] @@ -195,6 +230,16 @@ def test_cell_create_child(self): self.assertNotIn('password', cell) self.assertNotIn('is_parent', cell) + def test_cell_create_child(self): + # Test create with just cells policy + rules = {"default": "is_admin:true", + "compute_extension:cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_create_child() + + def test_cell_create_child_with_create_policy(self): + self._cell_create_child() + def test_cell_create_no_name_raises(self): body = {'cell': {'username': 'moocow', 'password': 'secret', @@ -202,6 +247,7 @@ def test_cell_create_no_name_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -213,6 +259,7 @@ def test_cell_create_name_empty_string_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -224,6 +271,7 @@ def test_cell_create_name_with_bang_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -235,6 +283,7 @@ def test_cell_create_name_with_dot_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -246,14 +295,24 @@ def test_cell_create_name_with_invalid_type_raises(self): 'type': 'invalid'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) - def test_cell_update(self): + def test_cell_create_fails_for_invalid_policy(self): + body = {'cell': {'name': 'fake'}} + req = self._get_request("cells") + req.environ['nova.context'] = self.context + req.environ['nova.context'].is_admin = False + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.create, req, body) + + def _cell_update(self): body = {'cell': {'username': 'zeb', 'password': 'sneaky'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context res_dict = self.controller.update(req, 'cell1', body) cell = res_dict['cell'] @@ -262,12 +321,31 @@ def test_cell_update(self): self.assertEqual(cell['username'], 'zeb') self.assertNotIn('password', cell) + def test_cell_update(self): + # Test cell update with just cell policy + rules = {"default": "is_admin:true", + "compute_extension:cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_update() + + def test_cell_update_with_update_policy(self): + self._cell_update() + + def test_cell_update_fails_for_invalid_policy(self): + body = {'cell': {'name': 'got_changed'}} + req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context + req.environ['nova.context'].is_admin = False + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.create, req, body) + def test_cell_update_empty_name_raises(self): body = {'cell': {'name': '', 'username': 'zeb', 'password': 'sneaky'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.update, req, 'cell1', body) @@ -277,6 +355,7 @@ def test_cell_update_invalid_type_raises(self): 'password': 'sneaky'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.update, req, 'cell1', body) @@ -284,6 +363,7 @@ def test_cell_update_without_type_specified(self): body = {'cell': {'username': 'wingwj'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context res_dict = self.controller.update(req, 'cell1', body) cell = res_dict['cell'] @@ -297,10 +377,12 @@ def test_cell_update_with_type_specified(self): body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}} req1 = self._get_request("cells/cell1") + req1.environ['nova.context'] = self.context res_dict1 = self.controller.update(req1, 'cell1', body1) cell1 = res_dict1['cell'] req2 = self._get_request("cells/cell2") + req2.environ['nova.context'] = self.context res_dict2 = self.controller.update(req2, 'cell2', body2) cell2 = res_dict2['cell'] @@ -406,6 +488,7 @@ def sync_instances(self, context, **kwargs): self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances) req = self._get_request("cells/sync_instances") + req.environ['nova.context'] = self.context body = {} self.controller.sync_instances(req, body=body) self.assertIsNone(call_info['project_id']) @@ -455,6 +538,20 @@ def sync_instances(self, context, **kwargs): self.assertRaises(exc.HTTPBadRequest, self.controller.sync_instances, req, body=body) + def test_sync_instances_fails_for_invalid_policy(self): + def sync_instances(self, context, **kwargs): + pass + + self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances) + + req = self._get_request("cells/sync_instances") + req.environ['nova.context'] = self.context + req.environ['nova.context'].is_admin = False + + body = {} + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.sync_instances, req, body) + def test_cells_disabled(self): self.flags(enable=False, group='cells') diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py index 874b1ffc08..63cf2d96bd 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py @@ -128,7 +128,7 @@ def test_get_cell_by_name(self): self.assertEqual(cell['rpc_host'], 'r1.example.org') self.assertNotIn('password', cell) - def test_cell_delete(self): + def _cell_delete(self): call_info = {'delete_called': 0} def fake_cell_delete(inst, context, cell_name): @@ -138,9 +138,20 @@ def fake_cell_delete(inst, context, cell_name): self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete) req = self._get_request("cells/cell999") + req.environ['nova.context'] = self.context self.controller.delete(req, 'cell999') self.assertEqual(call_info['delete_called'], 1) + def test_cell_delete(self): + # Test delete with just cells policy + rules = {"default": "is_admin:true", + "compute_extension:v3:os-cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_delete() + + def test_cell_delete_with_delete_policy(self): + self._cell_delete() + def test_delete_bogus_cell_raises(self): def fake_cell_delete(inst, context, cell_name): return 0 @@ -152,7 +163,19 @@ def fake_cell_delete(inst, context, cell_name): self.assertRaises(exc.HTTPNotFound, self.controller.delete, req, 'cell999') - def test_cell_create_parent(self): + def test_cell_delete_fails_for_invalid_policy(self): + def fake_cell_delete(inst, context, cell_name): + pass + + self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete) + + req = self._get_request("cells/cell999") + req.environ['nova.context'] = self.context + req.environ["nova.context"].is_admin = False + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.delete, req, 'cell999') + + def _cell_create_parent(self): body = {'cell': {'name': 'meow', 'username': 'fred', 'password': 'fubar', @@ -162,6 +185,7 @@ def test_cell_create_parent(self): 'is_parent': False}} req = self._get_request("cells") + req.environ['nova.context'] = self.context res_dict = self.controller.create(req, body) cell = res_dict['cell'] self.assertEqual(self.controller.create.wsgi_code, 201) @@ -172,7 +196,17 @@ def test_cell_create_parent(self): self.assertNotIn('password', cell) self.assertNotIn('is_parent', cell) - def test_cell_create_child(self): + def test_cell_create_parent(self): + # Test create with just cells policy + rules = {"default": "is_admin:true", + "compute_extension:v3:os-cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_create_parent() + + def test_cell_create_parent_with_create_policy(self): + self._cell_create_parent() + + def _cell_create_child(self): body = {'cell': {'name': 'meow', 'username': 'fred', 'password': 'fubar', @@ -180,6 +214,7 @@ def test_cell_create_child(self): 'type': 'child'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context res_dict = self.controller.create(req, body) cell = res_dict['cell'] self.assertEqual(self.controller.create.wsgi_code, 201) @@ -190,6 +225,16 @@ def test_cell_create_child(self): self.assertNotIn('password', cell) self.assertNotIn('is_parent', cell) + def test_cell_create_child(self): + # Test create child with just cells policy + rules = {"default": "is_admin:true", + "compute_extension:v3:os-cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_create_child() + + def test_cell_create_child_with_create_policy(self): + self._cell_create_child() + def test_cell_create_no_name_raises(self): body = {'cell': {'username': 'moocow', 'password': 'secret', @@ -197,6 +242,7 @@ def test_cell_create_no_name_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -208,6 +254,7 @@ def test_cell_create_name_empty_string_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -219,6 +266,7 @@ def test_cell_create_name_with_bang_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -230,6 +278,7 @@ def test_cell_create_name_with_dot_raises(self): 'type': 'parent'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) @@ -241,14 +290,24 @@ def test_cell_create_name_with_invalid_type_raises(self): 'type': 'invalid'}} req = self._get_request("cells") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) - def test_cell_update(self): + def test_cell_create_fails_for_invalid_policy(self): + body = {'cell': {'name': 'fake'}} + req = self._get_request("cells") + req.environ['nova.context'] = self.context + req.environ['nova.context'].is_admin = False + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.create, req, body) + + def _cell_update(self): body = {'cell': {'username': 'zeb', 'password': 'sneaky'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context res_dict = self.controller.update(req, 'cell1', body) cell = res_dict['cell'] @@ -257,12 +316,31 @@ def test_cell_update(self): self.assertEqual(cell['username'], 'zeb') self.assertNotIn('password', cell) + def test_cell_update(self): + # Test update with just cells policy + rules = {"default": "is_admin:true", + "compute_extension:v3:os-cells": "is_admin:true"} + self.policy.set_rules(rules) + self._cell_update() + + def test_cell_update_with_update_policy(self): + self._cell_update() + + def test_cell_update_fails_for_invalid_policy(self): + body = {'cell': {'name': 'got_changed'}} + req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context + req.environ['nova.context'].is_admin = False + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.create, req, body) + def test_cell_update_empty_name_raises(self): body = {'cell': {'name': '', 'username': 'zeb', 'password': 'sneaky'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.update, req, 'cell1', body) @@ -272,6 +350,7 @@ def test_cell_update_invalid_type_raises(self): 'password': 'sneaky'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, self.controller.update, req, 'cell1', body) @@ -279,6 +358,7 @@ def test_cell_update_without_type_specified(self): body = {'cell': {'username': 'wingwj'}} req = self._get_request("cells/cell1") + req.environ['nova.context'] = self.context res_dict = self.controller.update(req, 'cell1', body) cell = res_dict['cell'] @@ -292,10 +372,12 @@ def test_cell_update_with_type_specified(self): body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}} req1 = self._get_request("cells/cell1") + req1.environ['nova.context'] = self.context res_dict1 = self.controller.update(req1, 'cell1', body1) cell1 = res_dict1['cell'] req2 = self._get_request("cells/cell2") + req2.environ['nova.context'] = self.context res_dict2 = self.controller.update(req2, 'cell2', body2) cell2 = res_dict2['cell'] @@ -343,7 +425,7 @@ def test_show_capacities(self): self.assertEqual(response, res_dict['cell']['capacities']) def test_show_capacity_fails_with_non_admin_context(self): - rules = {"compute_extension:cells": "is_admin:true"} + rules = {"compute_extension:v3:os-cells": "is_admin:true"} self.policy.set_rules(rules) self.mox.ReplayAll() @@ -397,6 +479,7 @@ def sync_instances(self, context, **kwargs): self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances) req = self._get_request("cells/sync_instances") + req.environ['nova.context'] = self.context body = {} self.controller.sync_instances(req, body=body) self.assertIsNone(call_info['project_id']) @@ -446,6 +529,20 @@ def sync_instances(self, context, **kwargs): self.assertRaises(exc.HTTPBadRequest, self.controller.sync_instances, req, body=body) + def test_sync_instances_fails_for_invalid_policy(self): + def sync_instances(self, context, **kwargs): + pass + + self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances) + + req = self._get_request("cells/sync_instances") + req.environ['nova.context'] = self.context + req.environ['nova.context'].is_admin = False + + body = {} + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.sync_instances, req, body) + def test_cells_disabled(self): self.flags(enable=False, group='cells') diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index 38d9cd0584..181ad7a438 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -134,7 +134,15 @@ "compute_extension:v3:os-attach-interfaces": "", "compute_extension:baremetal_nodes": "", "compute_extension:cells": "", + "compute_extension:cells:create": "rule:admin_api", + "compute_extension:cells:delete": "rule:admin_api", + "compute_extension:cells:update": "rule:admin_api", + "compute_extension:cells:sync_instances": "rule:admin_api", "compute_extension:v3:os-cells": "", + "compute_extension:v3:os-cells:create": "rule:admin_api", + "compute_extension:v3:os-cells:delete": "rule:admin_api", + "compute_extension:v3:os-cells:update": "rule:admin_api", + "compute_extension:v3:os-cells:sync_instances": "rule:admin_api", "compute_extension:certificates": "", "compute_extension:v3:os-certificates:create": "", "compute_extension:v3:os-certificates:show": "", From be58dd8432a8d12484f5553d79a02e720e2c0435 Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Fri, 13 Jun 2014 17:15:00 -0400 Subject: [PATCH 058/486] Xen: Cleanup orphan volume connections on boot failure If the boot process fails after the VDI creation but before a VBD is created then the current cleanup methods will not work as they all rely on lookups via VBD. Change-Id: Id0d93ee60f75bf319baf7859b220ca325175128a Closes-bug: #1329941 --- nova/tests/virt/xenapi/test_vmops.py | 3 ++ nova/tests/virt/xenapi/test_volume_utils.py | 30 +++++++++++++ nova/tests/virt/xenapi/test_volumeops.py | 49 +++++++++++++++++++++ nova/virt/xenapi/vmops.py | 3 ++ nova/virt/xenapi/volume_utils.py | 11 +++++ nova/virt/xenapi/volumeops.py | 17 +++++++ 6 files changed, 113 insertions(+) diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py index 47b0db457c..7fcea40f70 100644 --- a/nova/tests/virt/xenapi/test_vmops.py +++ b/nova/tests/virt/xenapi/test_vmops.py @@ -213,6 +213,8 @@ def _stub_out_common(self): self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type') self.mox.StubOutWithMock(vm_utils, 'get_vdis_for_instance') self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis') + self.mox.StubOutWithMock(self.vmops._volumeops, + 'safe_cleanup_from_vdis') self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis') self.mox.StubOutWithMock(vm_utils, 'create_kernel_and_ramdisk') @@ -370,6 +372,7 @@ def _test_spawn(self, name_label_param=None, block_device_info_param=None, vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance, kernel_file, ramdisk_file) vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"]) + self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"]) self.mox.ReplayAll() self.vmops.spawn(context, instance, image_meta, injected_files, diff --git a/nova/tests/virt/xenapi/test_volume_utils.py b/nova/tests/virt/xenapi/test_volume_utils.py index 1779bf2e50..f0049a6b33 100644 --- a/nova/tests/virt/xenapi/test_volume_utils.py +++ b/nova/tests/virt/xenapi/test_volume_utils.py @@ -41,6 +41,36 @@ class UUIDException(Exception): 'sr_uuid'), None) + def test_find_sr_from_vdi(self): + vdi_ref = 'fake-ref' + + def fake_call_xenapi(method, *args): + self.assertEqual(method, 'VDI.get_SR') + self.assertEqual(args[0], vdi_ref) + return args[0] + + session = mock.Mock() + session.call_xenapi.side_effect = fake_call_xenapi + self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref), + vdi_ref) + + def test_find_sr_from_vdi_exception(self): + vdi_ref = 'fake-ref' + + class FakeException(Exception): + pass + + def fake_call_xenapi(method, *args): + self.assertEqual(method, 'VDI.get_SR') + self.assertEqual(args[0], vdi_ref) + return args[0] + + session = mock.Mock() + session.XenAPI.Failure = FakeException + session.call_xenapi.side_effect = FakeException + self.assertRaises(exception.StorageError, + volume_utils.find_sr_from_vdi, session, vdi_ref) + class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB): def test_target_host(self): diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py index 5fca3878d4..fbb4ad09af 100644 --- a/nova/tests/virt/xenapi/test_volumeops.py +++ b/nova/tests/virt/xenapi/test_volumeops.py @@ -498,3 +498,52 @@ class FakeException(Exception): self.assertRaises(FakeException, self.ops.find_bad_volumes, "vm_ref") mock_scan.assert_called_once_with("sr_ref") + + +class CleanupFromVDIsTestCase(VolumeOpsTestBase): + def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs, + sr_refs): + find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref + in vdi_refs] + find_sr_from_vdi.assert_has_calls(find_sr_calls) + purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref + in sr_refs] + purge_sr.assert_has_calls(purge_sr_calls) + + @mock.patch.object(volume_utils, 'find_sr_from_vdi') + @mock.patch.object(volume_utils, 'purge_sr') + def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi): + vdi_refs = ['vdi_ref1', 'vdi_ref2'] + sr_refs = ['sr_ref1', 'sr_ref2'] + find_sr_from_vdi.side_effect = sr_refs + self.ops.safe_cleanup_from_vdis(vdi_refs) + + self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, + sr_refs) + + @mock.patch.object(volume_utils, 'find_sr_from_vdi', + side_effect=[exception.StorageError(reason=''), 'sr_ref2']) + @mock.patch.object(volume_utils, 'purge_sr') + def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr, + find_sr_from_vdi): + vdi_refs = ['vdi_ref1', 'vdi_ref2'] + sr_refs = ['sr_ref2'] + find_sr_from_vdi.side_effect = [exception.StorageError(reason=''), + sr_refs[0]] + self.ops.safe_cleanup_from_vdis(vdi_refs) + + self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, + sr_refs) + + @mock.patch.object(volume_utils, 'find_sr_from_vdi') + @mock.patch.object(volume_utils, 'purge_sr') + def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr, + find_sr_from_vdi): + vdi_refs = ['vdi_ref1', 'vdi_ref2'] + sr_refs = ['sr_ref1', 'sr_ref2'] + find_sr_from_vdi.side_effect = sr_refs + purge_sr.side_effects = [test.TestingException, None] + self.ops.safe_cleanup_from_vdis(vdi_refs) + + self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, + sr_refs) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c7db7ea992..f8b913b6b5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -341,6 +341,9 @@ def undo_create_disks(): vdi_refs = [vdi['ref'] for vdi in vdis.values() if not vdi.get('osvol')] vm_utils.safe_destroy_vdis(self._session, vdi_refs) + vol_vdi_refs = [vdi['ref'] for vdi in vdis.values() + if vdi.get('osvol')] + self._volumeops.safe_cleanup_from_vdis(vol_vdi_refs) undo_mgr.undo_with(undo_create_disks) return vdis diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index d1a01d58f2..f1537914be 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -301,6 +301,17 @@ def find_sr_from_vbd(session, vbd_ref): return sr_ref +def find_sr_from_vdi(session, vdi_ref): + """Find the SR reference from the VDI reference.""" + try: + sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) + except session.XenAPI.Failure as exc: + LOG.exception(exc) + raise exception.StorageError( + reason=_('Unable to find SR from VDI %s') % vdi_ref) + return sr_ref + + def find_vbd_by_number(session, vm_ref, dev_number): """Get the VBD reference from the device number.""" vbd_refs = session.VM.get_VBDs(vm_ref) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5eb28165af..f6b4bde804 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -204,3 +204,20 @@ def find_bad_volumes(self, vm_ref): raise return bad_devices + + def safe_cleanup_from_vdis(self, vdi_refs): + # A helper method to detach volumes that are not associated with an + # instance + + for vdi_ref in vdi_refs: + try: + sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref) + except exception.StorageError as exc: + LOG.debug(exc.format_message()) + continue + try: + # Forget (i.e. disconnect) SR only if not in use + volume_utils.purge_sr(self._session, sr_ref) + except Exception: + LOG.debug('Ignoring error while purging sr: %s' % sr_ref, + exc_info=True) From 6ddd9f93f82427ce909c7773f7a806361035a0b2 Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Fri, 11 Jul 2014 14:56:22 -0400 Subject: [PATCH 059/486] Don't remove delete_on_terminate volumes on a reschedule When cleaning up volumes before a reschedule if delete_on_terminate is True the volume would be deleted. That's not the desired behavior so the volume cleanup has been moved to take place when a build is aborted. Change-Id: I142370c0555495b4d51736f4f6b8070a3c112a59 Closes-bug: #1336127 --- nova/compute/manager.py | 18 ++----- nova/tests/compute/test_compute_mgr.py | 68 ++++++++++---------------- 2 files changed, 30 insertions(+), 56 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 05c452b097..b29296512b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1934,6 +1934,8 @@ def do_build_and_run_instance(context, instance, image, request_spec, LOG.exception(e.format_message(), instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) + self._cleanup_volumes(context, instance.uuid, + block_device_mapping, raise_exc=False) self._set_instance_error_state(context, instance) except Exception: # Should not reach here. @@ -1941,6 +1943,8 @@ def do_build_and_run_instance(context, instance, image, request_spec, LOG.exception(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) + self._cleanup_volumes(context, instance.uuid, + block_device_mapping, raise_exc=False) self._set_instance_error_state(context, instance) do_build_and_run_instance(context, instance, image, request_spec, @@ -2103,8 +2107,6 @@ def _build_resources(self, context, instance, requested_networks, self._shutdown_instance(context, instance, block_device_mapping, requested_networks, try_deallocate_networks=False) - self._cleanup_build_resources(context, instance, - block_device_mapping) except Exception: ctxt.reraise = False msg = _('Could not clean up failed build,' @@ -2130,18 +2132,6 @@ def _cleanup_allocated_networks(self, context, instance, # exception will be raised by instance.save() pass - def _cleanup_build_resources(self, context, instance, - block_device_mapping): - # Don't clean up networks here in case we reschedule - try: - self._cleanup_volumes(context, instance.uuid, - block_device_mapping) - except Exception: - with excutils.save_and_reraise_exception(): - msg = _('Failed to cleanup volumes for failed build,' - ' not rescheduling') - LOG.exception(msg, instance=instance) - @object_compat @messaging.expected_exceptions(exception.BuildAbortException, exception.UnexpectedTaskStateError, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 91abb7696c..7f353cca02 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1738,6 +1738,7 @@ def test_build_and_run_instance_called_with_proper_args(self): def test_build_abort_exception(self): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_set_instance_error_state') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') @@ -1751,6 +1752,8 @@ def test_build_abort_exception(self): instance_uuid=self.instance['uuid'])) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) + self.compute._cleanup_volumes(self.context, self.instance.uuid, + self.block_device_mapping, raise_exc=False) self.compute._set_instance_error_state(self.context, self.instance) self._instance_action_events() self.mox.ReplayAll() @@ -1799,6 +1802,7 @@ def test_rescheduled_exception_without_retry(self): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_set_instance_error_state') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self._do_build_instance_update() self.compute._build_and_run_instance(self.context, self.instance, self.image, self.injected_files, self.admin_pass, @@ -1890,9 +1894,11 @@ def test_rescheduled_exception_deallocate_network_if_dhcp(self): block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) - def _test_build_and_run_exceptions(self, exc, set_error=False): + def _test_build_and_run_exceptions(self, exc, set_error=False, + cleanup_volumes=False): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') self._do_build_instance_update() @@ -1903,6 +1909,9 @@ def _test_build_and_run_exceptions(self, exc, set_error=False): self.filter_properties).AndRaise(exc) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) + if cleanup_volumes: + self.compute._cleanup_volumes(self.context, self.instance.uuid, + self.block_device_mapping, raise_exc=False) if set_error: self.mox.StubOutWithMock(self.compute, '_set_instance_error_state') self.compute._set_instance_error_state(self.context, self.instance) @@ -1918,21 +1927,23 @@ def _test_build_and_run_exceptions(self, exc, set_error=False): security_groups=self.security_groups, block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) - self.mox.UnsetStubs() - def test_build_and_run_instance_exceptions(self): - exceptions = [ - exception.InstanceNotFound(instance_id=''), + def test_build_and_run_notfound_exception(self): + self._test_build_and_run_exceptions(exception.InstanceNotFound( + instance_id='')) + + def test_build_and_run_unexpecteddeleting_exception(self): + self._test_build_and_run_exceptions( exception.UnexpectedDeletingTaskStateError(expected='', - actual='')] - error_exceptions = [ - exception.BuildAbortException(instance_uuid='', reason=''), - test.TestingException()] + actual='')) - for exc in exceptions: - self._test_build_and_run_exceptions(exc) - for exc in error_exceptions: - self._test_build_and_run_exceptions(exc, set_error=True) + def test_build_and_run_buildabort_exception(self): + self._test_build_and_run_exceptions(exception.BuildAbortException( + instance_uuid='', reason=''), set_error=True, cleanup_volumes=True) + + def test_build_and_run_unhandled_exception(self): + self._test_build_and_run_exceptions(test.TestingException(), + set_error=True, cleanup_volumes=True) def test_instance_not_found(self): exc = exception.InstanceNotFound(instance_id=1) @@ -2228,7 +2239,6 @@ def test_failed_network_alloc_from_delete_raises_unexpected(self): self.requested_networks, self.security_groups)) def test_build_resources_with_network_info_obj_on_spawn_failure(self): - self.mox.StubOutWithMock(self.compute, '_cleanup_build_resources') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, @@ -2238,8 +2248,6 @@ def test_build_resources_with_network_info_obj_on_spawn_failure(self): self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._build_resources_instance_update() - self.compute._cleanup_build_resources(self.context, self.instance, - self.block_device_mapping) self.mox.ReplayAll() test_exception = test.TestingException() @@ -2256,7 +2264,6 @@ def fake_spawn(): self.assertEqual(test_exception, e) def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self): - self.mox.StubOutWithMock(self.compute, '_cleanup_build_resources') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, @@ -2266,8 +2273,6 @@ def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self): self.block_device_mapping, self.requested_networks, try_deallocate_networks=False) self._build_resources_instance_update() - self.compute._cleanup_build_resources(self.context, self.instance, - self.block_device_mapping) self.mox.ReplayAll() test_exception = test.TestingException() @@ -2284,7 +2289,6 @@ def fake_spawn(): self.assertEqual(test_exception, e) def test_build_resources_aborts_on_cleanup_failure(self): - self.mox.StubOutWithMock(self.compute, '_cleanup_build_resources') self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') self.mox.StubOutWithMock(self.compute, '_shutdown_instance') self.compute._build_networks_for_instance(self.context, self.instance, @@ -2292,10 +2296,9 @@ def test_build_resources_aborts_on_cleanup_failure(self): self.network_info) self.compute._shutdown_instance(self.context, self.instance, self.block_device_mapping, self.requested_networks, - try_deallocate_networks=False) + try_deallocate_networks=False).AndRaise( + test.TestingException()) self._build_resources_instance_update() - self.compute._cleanup_build_resources(self.context, self.instance, - self.block_device_mapping).AndRaise(test.TestingException()) self.mox.ReplayAll() def fake_spawn(): @@ -2309,25 +2312,6 @@ def fake_spawn(): except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) - def test_cleanup_cleans_volumes(self): - self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') - self.compute._cleanup_volumes(self.context, self.instance['uuid'], - self.block_device_mapping) - self.mox.ReplayAll() - - self.compute._cleanup_build_resources(self.context, self.instance, - self.block_device_mapping) - - def test_cleanup_reraises_volume_cleanup_failure(self): - self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') - self.compute._cleanup_volumes(self.context, self.instance['uuid'], - self.block_device_mapping).AndRaise(test.TestingException()) - self.mox.ReplayAll() - - self.assertRaises(test.TestingException, - self.compute._cleanup_build_resources, self.context, - self.instance, self.block_device_mapping) - def test_build_networks_if_not_allocated(self): instance = fake_instance.fake_instance_obj(self.context, system_metadata={}, From 20f55e78828dc350e245aeab0a0b675c393b7409 Mon Sep 17 00:00:00 2001 From: Jay Lau Date: Wed, 16 Jul 2014 19:29:24 -0400 Subject: [PATCH 060/486] Enable live migration unit test use instance object test_post_live_migration_no_shared_storage_working_correctly is still using instance dict, we should change it to using instance object. Related to blueprint compute-manager-objects-juno Change-Id: I7709b32ebeef88eb9759d0275c3076091b6be669 --- nova/tests/compute/test_compute.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index f5dc27345d..387dea0e07 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -5353,32 +5353,30 @@ def fakecleanup(*args, **kwargs): # creating testdata c = context.get_admin_context() - inst_ref = jsonutils.to_primitive(self._create_fake_instance({ + instance = self._create_fake_instance_obj({ 'host': srchost, 'state_description': 'migrating', - 'state': power_state.PAUSED})) - inst_uuid = inst_ref['uuid'] - db.instance_update(c, inst_uuid, - {'task_state': task_states.MIGRATING, - 'power_state': power_state.PAUSED}) + 'state': power_state.PAUSED, + 'task_state': task_states.MIGRATING, + 'power_state': power_state.PAUSED}) # creating mocks self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') - self.compute.driver.unfilter_instance(inst_ref, []) + self.compute.driver.unfilter_instance(instance, []) self.mox.StubOutWithMock(self.compute.network_api, 'migrate_instance_start') migration = {'source_compute': srchost, 'dest_compute': dest, } - self.compute.network_api.migrate_instance_start(c, inst_ref, + self.compute.network_api.migrate_instance_start(c, instance, migration) self.mox.StubOutWithMock(self.compute.compute_rpcapi, 'post_live_migration_at_destination') self.compute.compute_rpcapi.post_live_migration_at_destination( - c, inst_ref, False, dest) + c, instance, False, dest) self.mox.StubOutWithMock(self.compute.network_api, 'setup_networks_on_host') - self.compute.network_api.setup_networks_on_host(c, inst_ref, + self.compute.network_api.setup_networks_on_host(c, instance, self.compute.host, teardown=True) self.mox.StubOutWithMock(self.compute.instance_events, @@ -5389,7 +5387,7 @@ def fakecleanup(*args, **kwargs): # start test self.mox.ReplayAll() migrate_data = {'is_shared_instance_path': False} - self.compute._post_live_migration(c, inst_ref, dest, + self.compute._post_live_migration(c, instance, dest, migrate_data=migrate_data) self.assertIn('cleanup', result) self.assertEqual(result['cleanup'], True) From 9976193cbd22edb3e1129a447770f3138c3865aa Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 2 Dec 2013 05:26:47 -0800 Subject: [PATCH 061/486] VMware: support the hotplug of a neutron port Nova already has support for hotplugging neutorn ports in the libvirt driver. This extends the support to the VMware driver. Implements the blueprint vmware-hot-plug Change-Id: I01b78621016e20c8e61d48bdeeac166f08f799be --- nova/tests/virt/vmwareapi/fake.py | 19 +++ nova/tests/virt/vmwareapi/test_driver_api.py | 116 ++++++++++++++++++- nova/tests/virt/vmwareapi/test_vm_util.py | 103 +++++++++++++++- nova/virt/vmwareapi/driver.py | 18 +++ nova/virt/vmwareapi/vif.py | 33 ++++-- nova/virt/vmwareapi/vm_util.py | 101 ++++++++++++++-- nova/virt/vmwareapi/vmops.py | 68 +++++++++++ 7 files changed, 434 insertions(+), 24 deletions(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index 7639aef8cd..fc5589f0f4 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -439,6 +439,20 @@ def __init__(self, **kwargs): ('featureRequirement', [key1, key2])] self.set("summary.runtime", runtime) + def _update_extra_config(self, extra): + extra_config = self.get("config.extraConfig") + values = extra_config.OptionValue + for value in values: + if value.key == extra.key: + value.value = extra.value + return + kv = DataObject() + kv.key = extra.key + kv.value = extra.value + extra_config.OptionValue.append(kv) + self.set("config.extraConfig", extra_config) + extra_config = self.get("config.extraConfig") + def reconfig(self, factory, val): """Called to reconfigure the VM. Actually customizes the property setting of the Virtual Machine object. @@ -462,6 +476,11 @@ def reconfig(self, factory, val): if not hasattr(val, 'deviceChange'): return + if hasattr(val, 'extraConfig'): + # there are 2 cases - new entry or update an existing one + for extra in val.extraConfig: + self._update_extra_config(extra) + if len(val.deviceChange) < 2: return diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index a8b06d285c..f00ac902f8 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -38,6 +38,7 @@ from nova import context from nova import exception from nova.image import glance +from nova.network import model as network_model from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova.openstack.common import units @@ -59,6 +60,7 @@ from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import imagecache from nova.virt.vmwareapi import read_write_util +from nova.virt.vmwareapi import vif from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util @@ -565,7 +567,8 @@ def _check_vm_record(self, num_instances=1, powered_on=True): found_vm_uuid = False found_iface_id = False - for c in vm.get("config.extraConfig").OptionValue: + extras = vm.get("config.extraConfig") + for c in extras.OptionValue: if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']): found_vm_uuid = True if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"): @@ -2359,6 +2362,117 @@ def test_unplug_vifs(self): self.conn.unplug_vifs, instance=self.instance, network_info=None) + def _create_vif(self): + gw_4 = network_model.IP(address='101.168.1.1', type='gateway') + dns_4 = network_model.IP(address='8.8.8.8', type=None) + subnet_4 = network_model.Subnet(cidr='101.168.1.0/24', + dns=[dns_4], + gateway=gw_4, + routes=None, + dhcp_server='191.168.1.1') + + gw_6 = network_model.IP(address='101:1db9::1', type='gateway') + subnet_6 = network_model.Subnet(cidr='101:1db9::/64', + dns=None, + gateway=gw_6, + ips=None, + routes=None) + + network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz', + bridge=None, + label=None, + subnets=[subnet_4, + subnet_6], + bridge_interface='eth0', + vlan=99) + + vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz', + address='ca:fe:de:ad:be:ef', + network=network_neutron, + type=None, + devname='tap-xxx-yyy-zzz', + ovs_interfaceid='aaa-bbb-ccc') + return vif_bridge_neutron + + def _validate_interfaces(self, id, index, num_iface_ids): + vm = self._get_vm_record() + found_iface_id = False + extras = vm.get("config.extraConfig") + key = "nvp.iface-id.%s" % index + num_found = 0 + for c in extras.OptionValue: + if c.key.startswith("nvp.iface-id."): + num_found += 1 + if c.key == key and c.value == id: + found_iface_id = True + self.assertTrue(found_iface_id) + self.assertEqual(num_found, num_iface_ids) + + def _attach_interface(self, vif): + self.conn.attach_interface(self.instance, self.image, vif) + self._validate_interfaces(vif['id'], 1, 2) + + def test_attach_interface(self): + self._create_vm() + vif = self._create_vif() + self._attach_interface(vif) + + def test_attach_interface_with_exception(self): + self._create_vm() + vif = self._create_vif() + + with mock.patch.object(self.conn._session, '_wait_for_task', + side_effect=Exception): + self.assertRaises(exception.InterfaceAttachFailed, + self.conn.attach_interface, + self.instance, self.image, vif) + + @mock.patch.object(vif, 'get_network_device', + return_value='fake_device') + def _detach_interface(self, vif, mock_get_device): + self._create_vm() + self._attach_interface(vif) + self.conn.detach_interface(self.instance, vif) + self._validate_interfaces('free', 1, 2) + + def test_detach_interface(self): + vif = self._create_vif() + self._detach_interface(vif) + + def test_detach_interface_and_attach(self): + vif = self._create_vif() + self._detach_interface(vif) + self.conn.attach_interface(self.instance, self.image, vif) + self._validate_interfaces(vif['id'], 1, 2) + + def test_detach_interface_no_device(self): + self._create_vm() + vif = self._create_vif() + self._attach_interface(vif) + self.assertRaises(exception.NotFound, self.conn.detach_interface, + self.instance, vif) + + def test_detach_interface_no_vif_match(self): + self._create_vm() + vif = self._create_vif() + self._attach_interface(vif) + vif['id'] = 'bad-id' + self.assertRaises(exception.NotFound, self.conn.detach_interface, + self.instance, vif) + + @mock.patch.object(vif, 'get_network_device', + return_value='fake_device') + def test_detach_interface_with_exception(self, mock_get_device): + self._create_vm() + vif = self._create_vif() + self._attach_interface(vif) + + with mock.patch.object(self.conn._session, '_wait_for_task', + side_effect=Exception): + self.assertRaises(exception.InterfaceDetachFailed, + self.conn.detach_interface, + self.instance, vif) + def test_migrate_disk_and_power_off(self): def fake_update_instance_progress(context, instance, step, total_steps): diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py index 15c4be3503..f94ab168fd 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util.py +++ b/nova/tests/virt/vmwareapi/test_vm_util.py @@ -699,18 +699,18 @@ def fake_wait_for_task(self, *args): def test_convert_vif_model(self): expected = "VirtualE1000" - result = vm_util._convert_vif_model(network_model.VIF_MODEL_E1000) + result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000) self.assertEqual(expected, result) expected = "VirtualE1000e" - result = vm_util._convert_vif_model(network_model.VIF_MODEL_E1000E) + result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E) self.assertEqual(expected, result) types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet"] for type in types: self.assertEqual(type, - vm_util._convert_vif_model(type)) + vm_util.convert_vif_model(type)) self.assertRaises(exception.Invalid, - vm_util._convert_vif_model, + vm_util.convert_vif_model, "InvalidVifModel") def test_power_on_instance_with_vm_ref(self): @@ -855,3 +855,98 @@ def test_reconfigure_vm(self): 'ReconfigVM_Task', 'fake-ref', spec='fake-spec') _wait_for_task.assert_called_once_with( 'fake_reconfigure_task') + + def test_get_network_attach_config_spec_opaque(self): + vif_info = {'network_name': 'br-int', + 'mac_address': '00:00:00:ca:fe:01', + 'network_ref': {'type': 'OpaqueNetwork', + 'network-id': 'fake-network-id', + 'network-type': 'opaque'}, + 'iface_id': 7, + 'vif_model': 'VirtualE1000'} + result = vm_util.get_network_attach_config_spec( + fake.FakeFactory(), vif_info, 1) + card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo' + expected = """{ + 'extraConfig': [{'value': 7, + 'key': 'nvp.iface-id.1', + 'obj_name':'ns0:OptionValue'}], + 'deviceChange': [ + {'device': { + 'macAddress':'00:00:00:ca:fe:01', + 'addressType': 'manual', + 'connectable': { + 'allowGuestControl':True, + 'startConnected': True, + 'connected': True, + 'obj_name':'ns0:VirtualDeviceConnectInfo'}, + 'backing': { + 'opaqueNetworkType': 'opaque', + 'opaqueNetworkId': 'fake-network-id', + 'obj_name': '%(card)s'}, + 'key': -47, + 'obj_name': 'ns0:VirtualE1000', + 'wakeOnLanEnabled': True}, + 'operation': 'add', + 'obj_name': 'ns0:VirtualDeviceConfigSpec'}], + 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card} + expected = re.sub(r'\s+', '', expected) + result = re.sub(r'\s+', '', repr(result)) + self.assertEqual(expected, result) + + def test_get_network_attach_config_spec_dvs(self): + vif_info = {'network_name': 'br100', + 'mac_address': '00:00:00:ca:fe:01', + 'network_ref': {'type': 'DistributedVirtualPortgroup', + 'dvsw': 'fake-network-id', + 'dvpg': 'fake-group'}, + 'iface_id': 7, + 'vif_model': 'VirtualE1000'} + result = vm_util.get_network_attach_config_spec( + fake.FakeFactory(), vif_info, 1) + port = 'ns0:DistributedVirtualSwitchPortConnection' + backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo' + expected = """{ + 'extraConfig': [{'value': 7, + 'key': 'nvp.iface-id.1', + 'obj_name': 'ns0:OptionValue'}], + 'deviceChange': [ + {'device': {'macAddress': '00:00:00:ca:fe:01', + 'addressType': 'manual', + 'connectable': { + 'allowGuestControl': True, + 'startConnected': True, + 'connected': True, + 'obj_name': 'ns0:VirtualDeviceConnectInfo'}, + 'backing': { + 'port': { + 'portgroupKey': 'fake-group', + 'switchUuid': 'fake-network-id', + 'obj_name': '%(obj_name_port)s'}, + 'obj_name': '%(obj_name_backing)s'}, + 'key': -47, + 'obj_name': 'ns0:VirtualE1000', + 'wakeOnLanEnabled': True}, + 'operation': 'add', + 'obj_name': 'ns0:VirtualDeviceConfigSpec'}], + 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % { + 'obj_name_backing': backing, + 'obj_name_port': port} + expected = re.sub(r'\s+', '', expected) + result = re.sub(r'\s+', '', repr(result)) + self.assertEqual(expected, result) + + def test_get_network_detach_config_spec(self): + result = vm_util.get_network_detach_config_spec( + fake.FakeFactory(), 'fake-device', 2) + expected = """{ + 'extraConfig': [{'value': 'free', + 'key': 'nvp.iface-id.2', + 'obj_name': 'ns0:OptionValue'}], + 'deviceChange': [{'device': 'fake-device', + 'operation': 'remove', + 'obj_name': 'ns0:VirtualDeviceConfigSpec'}], + 'obj_name':'ns0:VirtualMachineConfigSpec'}""" + expected = re.sub(r'\s+', '', expected) + result = re.sub(r'\s+', '', repr(result)) + self.assertEqual(expected, result) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 313ef5f87c..a5bcca250e 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -371,6 +371,14 @@ def instance_exists(self, instance): """Efficient override of base instance_exists method.""" return self._vmops.instance_exists(instance) + def attach_interface(self, instance, image_meta, vif): + """Attach an interface to the instance.""" + self._vmops.attach_interface(instance, image_meta, vif) + + def detach_interface(self, instance, vif): + """Detach an interface from the instance.""" + self._vmops.detach_interface(instance, vif) + class VMwareVCDriver(VMwareESXDriver): """The VC host connection object.""" @@ -790,6 +798,16 @@ def instance_exists(self, instance): _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.instance_exists(instance) + def attach_interface(self, instance, image_meta, vif): + """Attach an interface to the instance.""" + _vmops = self._get_vmops_for_compute_node(instance.node) + _vmops.attach_interface(instance, image_meta, vif) + + def detach_interface(self, instance, vif): + """Detach an interface from the instance.""" + _vmops = self._get_vmops_for_compute_node(instance.node) + _vmops.detach_interface(instance, vif) + class VMwareAPISession(object): """Sets up a session with the VC/ESX host and handles all diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index f611ccf20a..1f59c35267 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -154,18 +154,33 @@ def get_network_ref(session, cluster, vif, is_neutron): return network_ref +def get_vif_dict(session, cluster, vif_model, is_neutron, vif): + mac = vif['address'] + name = vif['network']['bridge'] or CONF.vmware.integration_bridge + ref = get_network_ref(session, cluster, vif, is_neutron) + return {'network_name': name, + 'mac_address': mac, + 'network_ref': ref, + 'iface_id': vif['id'], + 'vif_model': vif_model} + + def get_vif_info(session, cluster, is_neutron, vif_model, network_info): vif_infos = [] if not network_info: return vif_infos for vif in network_info: - mac_address = vif['address'] - net_name = vif['network']['bridge'] or CONF.vmware.integration_bridge - network_ref = get_network_ref(session, cluster, vif, is_neutron) - vif_infos.append({'network_name': net_name, - 'mac_address': mac_address, - 'network_ref': network_ref, - 'iface_id': vif['id'], - 'vif_model': vif_model - }) + vif_infos.append(get_vif_dict(session, cluster, vif_model, + is_neutron, vif)) return vif_infos + + +def get_network_device(hardware_devices, mac_address): + """Return the network device with MAC 'mac_address'.""" + if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": + hardware_devices = hardware_devices.VirtualDevice + for device in hardware_devices: + if device.__class__.__name__ in vm_util.ALL_SUPPORTED_NETWORK_DEVICES: + if hasattr(device, 'macAddress'): + if device.macAddress == mac_address: + return device diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index cc497673cf..5db8d08189 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -93,6 +93,13 @@ def wrapper(session, name): VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]' +def _iface_id_option_value(client_factory, iface_id, port_index): + opt = client_factory.create('ns0:OptionValue') + opt.key = "nvp.iface-id.%d" % port_index + opt.value = iface_id + return opt + + def get_vm_create_spec(client_factory, instance, name, data_store_name, vif_infos, os_type=constants.DEFAULT_OS_TYPE): """Builds the VM Create spec.""" @@ -125,7 +132,7 @@ def get_vm_create_spec(client_factory, instance, name, data_store_name, vif_spec_list = [] for vif_info in vif_infos: - vif_spec = create_network_spec(client_factory, vif_info) + vif_spec = _create_vif_spec(client_factory, vif_info) vif_spec_list.append(vif_spec) device_config_spec = vif_spec_list @@ -139,14 +146,13 @@ def get_vm_create_spec(client_factory, instance, name, data_store_name, opt.value = instance['uuid'] extra_config.append(opt) - i = 0 + port_index = 0 for vif_info in vif_infos: if vif_info['iface_id']: - opt = client_factory.create('ns0:OptionValue') - opt.key = "nvp.iface-id.%d" % i - opt.value = vif_info['iface_id'] - extra_config.append(opt) - i += 1 + extra_config.append(_iface_id_option_value(client_factory, + vif_info['iface_id'], + port_index)) + port_index += 1 config_spec.extraConfig = extra_config @@ -187,7 +193,7 @@ def create_controller_spec(client_factory, key, return virtual_device_config -def _convert_vif_model(name): +def convert_vif_model(name): """Converts standard VIF_MODEL types to the internal VMware ones.""" if name == network_model.VIF_MODEL_E1000: return 'VirtualE1000' @@ -199,7 +205,7 @@ def _convert_vif_model(name): return name -def create_network_spec(client_factory, vif_info): +def _create_vif_spec(client_factory, vif_info): """Builds a config spec for the addition of a new network adapter to the VM. """ @@ -207,7 +213,7 @@ def create_network_spec(client_factory, vif_info): network_spec.operation = "add" # Keep compatible with other Hyper vif model parameter. - vif_info['vif_model'] = _convert_vif_model(vif_info['vif_model']) + vif_info['vif_model'] = convert_vif_model(vif_info['vif_model']) vif = 'ns0:' + vif_info['vif_model'] net_device = client_factory.create(vif) @@ -260,6 +266,38 @@ def create_network_spec(client_factory, vif_info): return network_spec +def get_network_attach_config_spec(client_factory, vif_info, index): + """Builds the vif attach config spec.""" + config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') + vif_spec = _create_vif_spec(client_factory, vif_info) + config_spec.deviceChange = [vif_spec] + if vif_info['iface_id'] is not None: + config_spec.extraConfig = [_iface_id_option_value(client_factory, + vif_info['iface_id'], + index)] + return config_spec + + +def get_network_detach_config_spec(client_factory, device, port_index): + """Builds the vif detach config spec.""" + config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') + virtual_device_config = client_factory.create( + 'ns0:VirtualDeviceConfigSpec') + virtual_device_config.operation = "remove" + virtual_device_config.device = device + config_spec.deviceChange = [virtual_device_config] + # If a key is already present then it cannot be deleted, only updated. + # This enables us to reuse this key if there is an additional + # attachment. The keys need to be preserved. This is due to the fact + # that there is logic on the ESX that does the network wiring + # according to these values. If they are changed then this will + # break networking to and from the interface. + config_spec.extraConfig = [_iface_id_option_value(client_factory, + 'free', + port_index)] + return config_spec + + def get_vmdk_attach_config_spec(client_factory, disk_type=constants.DEFAULT_DISK_TYPE, file_path=None, @@ -1531,3 +1569,46 @@ def get_values_from_object_properties(session, props, properties): "continue_to_get_objects", token) return dictionary + + +def _get_vm_port_indices(session, vm_ref): + extra_config = session._call_method(vim_util, + 'get_dynamic_property', + vm_ref, 'VirtualMachine', + 'config.extraConfig') + ports = [] + if extra_config is not None: + options = extra_config.OptionValue + for option in options: + if (option.key.startswith('nvp.iface-id.') and + option.value != 'free'): + ports.append(int(option.key.split('.')[2])) + return ports + + +def get_attach_port_index(session, vm_ref): + """Get the first free port index.""" + ports = _get_vm_port_indices(session, vm_ref) + # No ports are configured on the VM + if not ports: + return 0 + ports.sort() + configured_ports_len = len(ports) + # Find the first free port index + for port_index in range(configured_ports_len): + if port_index != ports[port_index]: + return port_index + return configured_ports_len + + +def get_vm_detach_port_index(session, vm_ref, iface_id): + extra_config = session._call_method(vim_util, + 'get_dynamic_property', + vm_ref, 'VirtualMachine', + 'config.extraConfig') + if extra_config is not None: + options = extra_config.OptionValue + for option in options: + if (option.key.startswith('nvp.iface-id.') and + option.value == iface_id): + return int(option.key.split('.')[2]) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 50057315dc..3d5db25459 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -1511,6 +1511,74 @@ def instance_exists(self, instance): except exception.InstanceNotFound: return False + def attach_interface(self, instance, image_meta, vif): + """Attach an interface to the instance.""" + vif_model = image_meta.get("hw_vif_model", + constants.DEFAULT_VIF_MODEL) + vif_model = vm_util.convert_vif_model(vif_model) + vif_info = vmwarevif.get_vif_dict(self._session, self._cluster, + vif_model, utils.is_neutron(), vif) + vm_ref = vm_util.get_vm_ref(self._session, instance) + # Ensure that there is not a race with the port index management + with lockutils.lock(instance.uuid, + lock_file_prefix='nova-vmware-hot-plug'): + port_index = vm_util.get_attach_port_index(self._session, vm_ref) + client_factory = self._session._get_vim().client.factory + attach_config_spec = vm_util.get_network_attach_config_spec( + client_factory, vif_info, port_index) + LOG.debug("Reconfiguring VM to attach interface", + instance=instance) + try: + vm_util.reconfigure_vm(self._session, vm_ref, + attach_config_spec) + except Exception as e: + LOG.error(_LE('Attaching network adapter failed. Exception: ' + ' %s'), + e, instance=instance) + raise exception.InterfaceAttachFailed( + instance=instance['uuid']) + LOG.debug("Reconfigured VM to attach interface", instance=instance) + + def detach_interface(self, instance, vif): + """Detach an interface from the instance.""" + vm_ref = vm_util.get_vm_ref(self._session, instance) + # Ensure that there is not a race with the port index management + with lockutils.lock(instance.uuid, + lock_file_prefix='nova-vmware-hot-plug'): + port_index = vm_util.get_vm_detach_port_index(self._session, + vm_ref, + vif['id']) + if port_index is None: + msg = _("No device with interface-id %s exists on " + "VM") % vif['id'] + raise exception.NotFound(msg) + + hardware_devices = self._session._call_method(vim_util, + "get_dynamic_property", vm_ref, + "VirtualMachine", "config.hardware.device") + device = vmwarevif.get_network_device(hardware_devices, + vif['address']) + if device is None: + msg = _("No device with MAC address %s exists on the " + "VM") % vif['address'] + raise exception.NotFound(msg) + + client_factory = self._session._get_vim().client.factory + detach_config_spec = vm_util.get_network_detach_config_spec( + client_factory, device, port_index) + LOG.debug("Reconfiguring VM to detach interface", + instance=instance) + try: + vm_util.reconfigure_vm(self._session, vm_ref, + detach_config_spec) + except Exception as e: + LOG.error(_LE('Detaching network adapter failed. Exception: ' + '%s'), + e, instance=instance) + raise exception.InterfaceDetachFailed( + instance=instance['uuid']) + LOG.debug("Reconfigured VM to detach interface", instance=instance) + class VMwareVCVMOps(VMwareVMOps): """Management class for VM-related tasks. From fc8a70a02a18ecc26d9cbc24deee1efe23603ecc Mon Sep 17 00:00:00 2001 From: Vui Lam Date: Thu, 5 Jun 2014 16:16:55 -0700 Subject: [PATCH 062/486] VMware: consolidate datastore code This is largely a straight-forward code movement. Given that we have utility classes/code in ds_util.py, it makes sense to move the remaining datastore-related code in vm_util.py to ds_util.py as well. partial blueprint vmware-spawn-refactor Change-Id: I2ede44306acf91a13f2ff6f0d8f210642a1c1e69 --- nova/tests/virt/vmwareapi/test_ds_util.py | 97 ++++++++++++ ...py => test_ds_util_datastore_selection.py} | 17 +- nova/tests/virt/vmwareapi/test_vm_util.py | 96 ----------- nova/tests/virt/vmwareapi/test_vmops.py | 2 +- nova/virt/vmwareapi/ds_util.py | 149 ++++++++++++++++++ nova/virt/vmwareapi/host.py | 3 +- nova/virt/vmwareapi/vm_util.py | 142 +---------------- nova/virt/vmwareapi/vmops.py | 6 +- 8 files changed, 265 insertions(+), 247 deletions(-) rename nova/tests/virt/vmwareapi/{test_vm_util_datastore_selection.py => test_ds_util_datastore_selection.py} (92%) diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py index 06fd242022..2b957e1c5f 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util.py +++ b/nova/tests/virt/vmwareapi/test_ds_util.py @@ -13,9 +13,12 @@ # under the License. import contextlib +import re import mock +from nova import exception +from nova.openstack.common.gettextutils import _ from nova.openstack.common import units from nova import test from nova.tests.virt.vmwareapi import fake @@ -157,6 +160,100 @@ def fake_wait_for_task(task_ref): 'fake-browser', 'fake-path', 'fake-file') self.assertFalse(file_exists) + def test_get_datastore(self): + fake_objects = fake.FakeRetrieveResult() + fake_objects.add_object(fake.Datastore()) + result = ds_util.get_datastore( + fake.FakeObjectRetrievalSession(fake_objects)) + + self.assertEqual("fake-ds", result.name) + self.assertEqual(units.Ti, result.capacity) + self.assertEqual(500 * units.Gi, result.freespace) + + def test_get_datastore_with_regex(self): + # Test with a regex that matches with a datastore + datastore_valid_regex = re.compile("^openstack.*\d$") + fake_objects = fake.FakeRetrieveResult() + fake_objects.add_object(fake.Datastore("openstack-ds0")) + fake_objects.add_object(fake.Datastore("fake-ds0")) + fake_objects.add_object(fake.Datastore("fake-ds1")) + result = ds_util.get_datastore( + fake.FakeObjectRetrievalSession(fake_objects), None, None, + datastore_valid_regex) + self.assertEqual("openstack-ds0", result.name) + + def test_get_datastore_with_token(self): + regex = re.compile("^ds.*\d$") + fake0 = fake.FakeRetrieveResult() + fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi)) + fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi)) + setattr(fake0, 'token', 'token-0') + fake1 = fake.FakeRetrieveResult() + fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi)) + fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi)) + result = ds_util.get_datastore( + fake.FakeObjectRetrievalSession(fake0, fake1), None, None, regex) + self.assertEqual("ds2", result.name) + + def test_get_datastore_with_list(self): + # Test with a regex containing whitelist of datastores + datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)") + fake_objects = fake.FakeRetrieveResult() + fake_objects.add_object(fake.Datastore("openstack-ds0")) + fake_objects.add_object(fake.Datastore("openstack-ds1")) + fake_objects.add_object(fake.Datastore("openstack-ds2")) + result = ds_util.get_datastore( + fake.FakeObjectRetrievalSession(fake_objects), None, None, + datastore_valid_regex) + self.assertNotEqual("openstack-ds1", result.name) + + def test_get_datastore_with_regex_error(self): + # Test with a regex that has no match + # Checks if code raises DatastoreNotFound with a specific message + datastore_invalid_regex = re.compile("unknown-ds") + exp_message = (_("Datastore regex %s did not match any datastores") + % datastore_invalid_regex.pattern) + fake_objects = fake.FakeRetrieveResult() + fake_objects.add_object(fake.Datastore("fake-ds0")) + fake_objects.add_object(fake.Datastore("fake-ds1")) + # assertRaisesRegExp would have been a good choice instead of + # try/catch block, but it's available only from Py 2.7. + try: + ds_util.get_datastore( + fake.FakeObjectRetrievalSession(fake_objects), None, None, + datastore_invalid_regex) + except exception.DatastoreNotFound as e: + self.assertEqual(exp_message, e.args[0]) + else: + self.fail("DatastoreNotFound Exception was not raised with " + "message: %s" % exp_message) + + def test_get_datastore_without_datastore(self): + + self.assertRaises(exception.DatastoreNotFound, + ds_util.get_datastore, + fake.FakeObjectRetrievalSession(None), host="fake-host") + + self.assertRaises(exception.DatastoreNotFound, + ds_util.get_datastore, + fake.FakeObjectRetrievalSession(None), cluster="fake-cluster") + + def test_get_datastore_no_host_in_cluster(self): + self.assertRaises(exception.DatastoreNotFound, + ds_util.get_datastore, + fake.FakeObjectRetrievalSession(""), 'fake_cluster') + + def test_get_datastore_inaccessible_ds(self): + data_store = fake.Datastore() + data_store.set("summary.accessible", False) + + fake_objects = fake.FakeRetrieveResult() + fake_objects.add_object(data_store) + + self.assertRaises(exception.DatastoreNotFound, + ds_util.get_datastore, + fake.FakeObjectRetrievalSession(fake_objects)) + class DatastoreTestCase(test.NoDBTestCase): def test_ds(self): diff --git a/nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py b/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py similarity index 92% rename from nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py rename to nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py index b988b57486..c34e76f4c2 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py +++ b/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py @@ -17,7 +17,6 @@ from nova.openstack.common import units from nova import test from nova.virt.vmwareapi import ds_util -from nova.virt.vmwareapi import vm_util ResultSet = collections.namedtuple('ResultSet', ['objects']) ResultSetToken = collections.namedtuple('ResultSet', ['objects', 'token']) @@ -26,10 +25,10 @@ MoRef = collections.namedtuple('ManagedObjectReference', ['value']) -class VMwareVMUtilDatastoreSelectionTestCase(test.NoDBTestCase): +class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase): def setUp(self): - super(VMwareVMUtilDatastoreSelectionTestCase, self).setUp() + super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp() self.data = [ ['VMFS', 'os-some-name', True, 987654321, 12346789], ['NFS', 'another-name', True, 9876543210, 123467890], @@ -63,7 +62,7 @@ def test_filter_datastores_simple(self): datastores = self.build_result_set(self.data) best_match = ds_util.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) - rec = vm_util._select_datastore(datastores, best_match) + rec = ds_util._select_datastore(datastores, best_match) self.assertIsNotNone(rec.ref, "could not find datastore!") self.assertEqual('ds-001', rec.ref.value, @@ -77,7 +76,7 @@ def test_filter_datastores_empty(self): best_match = ds_util.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) - rec = vm_util._select_datastore(datastores, best_match) + rec = ds_util._select_datastore(datastores, best_match) self.assertEqual(rec, best_match) @@ -87,7 +86,7 @@ def test_filter_datastores_no_match(self): best_match = ds_util.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) - rec = vm_util._select_datastore(datastores, + rec = ds_util._select_datastore(datastores, best_match, datastore_regex) @@ -108,7 +107,7 @@ def test_filter_datastores_specific_match(self): best_match = ds_util.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) - rec = vm_util._select_datastore(datastores, + rec = ds_util._select_datastore(datastores, best_match, datastore_regex) @@ -135,7 +134,7 @@ def test_filter_datastores_missing_props(self): best_match = ds_util.Datastore(ref='fake_ref', name='ds', capacity=0, freespace=0) - rec = vm_util._select_datastore(datastores, best_match) + rec = ds_util._select_datastore(datastores, best_match) self.assertEqual(rec, best_match, "no matches were expected") def test_filter_datastores_best_match(self): @@ -153,7 +152,7 @@ def test_filter_datastores_best_match(self): # the current best match is better than all candidates best_match = ds_util.Datastore(ref='ds-100', name='best-ds-good', capacity=20 * units.Gi, freespace=19 * units.Gi) - rec = vm_util._select_datastore(datastores, + rec = ds_util._select_datastore(datastores, best_match, datastore_regex) self.assertEqual(rec, best_match, "did not match datastore properly") diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py index d6d7285fae..0e7f30f1e2 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util.py +++ b/nova/tests/virt/vmwareapi/test_vm_util.py @@ -22,8 +22,6 @@ from nova import exception from nova.network import model as network_model -from nova.openstack.common.gettextutils import _ -from nova.openstack.common import units from nova.openstack.common import uuidutils from nova import test from nova.tests.virt.vmwareapi import fake @@ -47,28 +45,6 @@ def tearDown(self): super(VMwareVMUtilTestCase, self).tearDown() fake.reset() - def test_get_datastore(self): - fake_objects = fake.FakeRetrieveResult() - fake_objects.add_object(fake.Datastore()) - result = vm_util.get_datastore( - fake.FakeObjectRetrievalSession(fake_objects)) - - self.assertEqual("fake-ds", result.name) - self.assertEqual(units.Ti, result.capacity) - self.assertEqual(500 * units.Gi, result.freespace) - - def test_get_datastore_with_regex(self): - # Test with a regex that matches with a datastore - datastore_valid_regex = re.compile("^openstack.*\d$") - fake_objects = fake.FakeRetrieveResult() - fake_objects.add_object(fake.Datastore("openstack-ds0")) - fake_objects.add_object(fake.Datastore("fake-ds0")) - fake_objects.add_object(fake.Datastore("fake-ds1")) - result = vm_util.get_datastore( - fake.FakeObjectRetrievalSession(fake_objects), - None, None, datastore_valid_regex) - self.assertEqual("openstack-ds0", result.name) - def _test_get_stats_from_cluster(self, connection_state="connected", maintenance_mode=False): ManagedObjectRefs = [fake.ManagedObjectReference("host1", @@ -148,62 +124,6 @@ def test_get_stats_from_cluster_hosts_disconnected_and_active(self): def test_get_stats_from_cluster_hosts_connected_and_maintenance(self): self._test_get_stats_from_cluster(maintenance_mode=True) - def test_get_datastore_with_token(self): - regex = re.compile("^ds.*\d$") - fake0 = fake.FakeRetrieveResult() - fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi)) - fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi)) - setattr(fake0, 'token', 'token-0') - fake1 = fake.FakeRetrieveResult() - fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi)) - fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi)) - result = vm_util.get_datastore( - fake.FakeObjectRetrievalSession(fake0, fake1), None, None, regex) - self.assertEqual("ds2", result.name) - - def test_get_datastore_with_list(self): - # Test with a regex containing whitelist of datastores - datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)") - fake_objects = fake.FakeRetrieveResult() - fake_objects.add_object(fake.Datastore("openstack-ds0")) - fake_objects.add_object(fake.Datastore("openstack-ds1")) - fake_objects.add_object(fake.Datastore("openstack-ds2")) - result = vm_util.get_datastore( - fake.FakeObjectRetrievalSession(fake_objects), - None, None, datastore_valid_regex) - self.assertNotEqual("openstack-ds1", result.name) - - def test_get_datastore_with_regex_error(self): - # Test with a regex that has no match - # Checks if code raises DatastoreNotFound with a specific message - datastore_invalid_regex = re.compile("unknown-ds") - exp_message = (_("Datastore regex %s did not match any datastores") - % datastore_invalid_regex.pattern) - fake_objects = fake.FakeRetrieveResult() - fake_objects.add_object(fake.Datastore("fake-ds0")) - fake_objects.add_object(fake.Datastore("fake-ds1")) - # assertRaisesRegExp would have been a good choice instead of - # try/catch block, but it's available only from Py 2.7. - try: - vm_util.get_datastore( - fake.FakeObjectRetrievalSession(fake_objects), None, None, - datastore_invalid_regex) - except exception.DatastoreNotFound as e: - self.assertEqual(exp_message, e.args[0]) - else: - self.fail("DatastoreNotFound Exception was not raised with " - "message: %s" % exp_message) - - def test_get_datastore_without_datastore(self): - - self.assertRaises(exception.DatastoreNotFound, - vm_util.get_datastore, - fake.FakeObjectRetrievalSession(None), host="fake-host") - - self.assertRaises(exception.DatastoreNotFound, - vm_util.get_datastore, - fake.FakeObjectRetrievalSession(None), cluster="fake-cluster") - def test_get_host_ref_from_id(self): fake_host_name = "ha-host" fake_host_sys = fake.HostSystem(fake_host_name) @@ -226,11 +146,6 @@ def test_get_host_ref_no_hosts_in_cluster(self): vm_util.get_host_ref, fake.FakeObjectRetrievalSession(""), 'fake_cluster') - def test_get_datastore_no_host_in_cluster(self): - self.assertRaises(exception.DatastoreNotFound, - vm_util.get_datastore, - fake.FakeObjectRetrievalSession(""), 'fake_cluster') - @mock.patch.object(vm_util, '_get_vm_ref_from_vm_uuid', return_value=None) def test_get_host_name_for_vm(self, _get_ref_from_uuid): @@ -305,17 +220,6 @@ def test_property_from_property_set(self): self.assertIsNotNone(prop4) self.assertEqual('bar1', prop4.val) - def test_get_datastore_inaccessible_ds(self): - data_store = fake.Datastore() - data_store.set("summary.accessible", False) - - fake_objects = fake.FakeRetrieveResult() - fake_objects.add_object(data_store) - - self.assertRaises(exception.DatastoreNotFound, - vm_util.get_datastore, - fake.FakeObjectRetrievalSession(fake_objects)) - def test_get_resize_spec(self): fake_instance = {'id': 7, 'name': 'fake!', 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00', diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index 6d1b221f20..a093bff658 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -569,7 +569,7 @@ def _verify_spawn_method_calls(self, mock_call_method): recorded_methods = [c[1][1] for c in mock_call_method.mock_calls] self.assertEqual(expected_methods, recorded_methods) - @mock.patch('nova.virt.vmwareapi.vm_util.get_datastore') + @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore') @mock.patch( 'nova.virt.vmwareapi.vmops.VMwareVCVMOps.get_datacenter_ref_and_name') @mock.patch('nova.virt.vmwareapi.vm_util.get_mo_id_from_instance', diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index ad3fa1eda7..cc76b9a700 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -17,9 +17,12 @@ """ import posixpath +from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util +from nova.virt.vmwareapi import vim_util +from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) @@ -158,6 +161,152 @@ def build_datastore_path(datastore_name, path): return str(DatastorePath(datastore_name, path)) +# NOTE(mdbooth): this convenience function is temporarily duplicated in +# vm_util. The correct fix is to handle paginated results as they are returned +# from the relevant vim_util function. However, vim_util is currently +# effectively deprecated as we migrate to oslo.vmware. This duplication will be +# removed when we fix it properly in oslo.vmware. +def _get_token(results): + """Get the token from the property results.""" + return getattr(results, 'token', None) + + +def _select_datastore(data_stores, best_match, datastore_regex=None): + """Find the most preferable datastore in a given RetrieveResult object. + + :param data_stores: a RetrieveResult object from vSphere API call + :param best_match: the current best match for datastore + :param datastore_regex: an optional regular expression to match names + :return: datastore_ref, datastore_name, capacity, freespace + """ + + # data_stores is actually a RetrieveResult object from vSphere API call + for obj_content in data_stores.objects: + # the propset attribute "need not be set" by returning API + if not hasattr(obj_content, 'propSet'): + continue + + propdict = vm_util.propset_dict(obj_content.propSet) + # Local storage identifier vSphere doesn't support CIFS or + # vfat for datastores, therefore filtered + ds_type = propdict['summary.type'] + ds_name = propdict['summary.name'] + if ((ds_type == 'VMFS' or ds_type == 'NFS') and + propdict.get('summary.accessible')): + if datastore_regex is None or datastore_regex.match(ds_name): + new_ds = Datastore( + ref=obj_content.obj, + name=ds_name, + capacity=propdict['summary.capacity'], + freespace=propdict['summary.freeSpace']) + # favor datastores with more free space + if (best_match is None or + new_ds.freespace > best_match.freespace): + best_match = new_ds + + return best_match + + +def get_datastore(session, cluster=None, host=None, datastore_regex=None): + """Get the datastore list and choose the most preferable one.""" + if cluster is None and host is None: + data_stores = session._call_method(vim_util, "get_objects", + "Datastore", ["summary.type", "summary.name", + "summary.capacity", "summary.freeSpace", + "summary.accessible"]) + else: + if cluster is not None: + datastore_ret = session._call_method( + vim_util, + "get_dynamic_property", cluster, + "ClusterComputeResource", "datastore") + else: + datastore_ret = session._call_method( + vim_util, + "get_dynamic_property", host, + "HostSystem", "datastore") + + if not datastore_ret: + raise exception.DatastoreNotFound() + data_store_mors = datastore_ret.ManagedObjectReference + data_stores = session._call_method(vim_util, + "get_properties_for_a_collection_of_objects", + "Datastore", data_store_mors, + ["summary.type", "summary.name", + "summary.capacity", "summary.freeSpace", + "summary.accessible"]) + best_match = None + while data_stores: + best_match = _select_datastore(data_stores, best_match, + datastore_regex) + token = _get_token(data_stores) + if not token: + break + data_stores = session._call_method(vim_util, + "continue_to_get_objects", + token) + if best_match: + return best_match + if datastore_regex: + raise exception.DatastoreNotFound( + _("Datastore regex %s did not match any datastores") + % datastore_regex.pattern) + else: + raise exception.DatastoreNotFound() + + +def _get_allowed_datastores(data_stores, datastore_regex, allowed_types): + allowed = [] + for obj_content in data_stores.objects: + # the propset attribute "need not be set" by returning API + if not hasattr(obj_content, 'propSet'): + continue + + propdict = vm_util.propset_dict(obj_content.propSet) + # Local storage identifier vSphere doesn't support CIFS or + # vfat for datastores, therefore filtered + ds_type = propdict['summary.type'] + ds_name = propdict['summary.name'] + if (propdict['summary.accessible'] and ds_type in allowed_types): + if datastore_regex is None or datastore_regex.match(ds_name): + allowed.append({'ref': obj_content.obj, 'name': ds_name}) + + return allowed + + +def get_available_datastores(session, cluster=None, datastore_regex=None): + """Get the datastore list and choose the first local storage.""" + if cluster: + mobj = cluster + resource_type = "ClusterComputeResource" + else: + mobj = vm_util.get_host_ref(session) + resource_type = "HostSystem" + ds = session._call_method(vim_util, "get_dynamic_property", mobj, + resource_type, "datastore") + if not ds: + return [] + data_store_mors = ds.ManagedObjectReference + # NOTE(garyk): use utility method to retrieve remote objects + data_stores = session._call_method(vim_util, + "get_properties_for_a_collection_of_objects", + "Datastore", data_store_mors, + ["summary.type", "summary.name", "summary.accessible"]) + + allowed = [] + while data_stores: + allowed.extend(_get_allowed_datastores(data_stores, datastore_regex, + ['VMFS', 'NFS'])) + token = _get_token(data_stores) + if not token: + break + + data_stores = session._call_method(vim_util, + "continue_to_get_objects", + token) + return allowed + + def file_delete(session, datastore_path, dc_ref): LOG.debug("Deleting the datastore file %s", datastore_path) vim = session._get_vim() diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py index b1be80803c..ba9a01de75 100644 --- a/nova/virt/vmwareapi/host.py +++ b/nova/virt/vmwareapi/host.py @@ -21,6 +21,7 @@ from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils +from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util @@ -80,7 +81,7 @@ def set_host_enabled(self, _host, enabled): def _get_ds_capacity_and_freespace(session, cluster=None): try: - ds = vm_util.get_datastore(session, cluster) + ds = ds_util.get_datastore(session, cluster) return ds.capacity, ds.freespace except exception.DatastoreNotFound: return 0, 0 diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index cc497673cf..a57beb534b 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -30,7 +30,6 @@ from nova.openstack.common import units from nova import utils from nova.virt.vmwareapi import constants -from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vim_util @@ -689,6 +688,11 @@ def _get_allocated_vnc_ports(session): return vnc_ports +# NOTE(mdbooth): this convenience function is temporarily duplicated in +# ds_util. The correct fix is to handle paginated results as they are returned +# from the relevant vim_util function. However, vim_util is currently +# effectively deprecated as we migrate to oslo.vmware. This duplication will be +# removed when we fix it properly in oslo.vmware. def _get_token(results): """Get the token from the property results.""" return getattr(results, 'token', None) @@ -1011,142 +1015,6 @@ def propset_dict(propset): return dict([(prop.name, prop.val) for prop in propset]) -def _select_datastore(data_stores, best_match, datastore_regex=None): - """Find the most preferable datastore in a given RetrieveResult object. - - :param data_stores: a RetrieveResult object from vSphere API call - :param best_match: the current best match for datastore - :param datastore_regex: an optional regular expression to match names - :return: datastore_ref, datastore_name, capacity, freespace - """ - - # data_stores is actually a RetrieveResult object from vSphere API call - for obj_content in data_stores.objects: - # the propset attribute "need not be set" by returning API - if not hasattr(obj_content, 'propSet'): - continue - - propdict = propset_dict(obj_content.propSet) - # Local storage identifier vSphere doesn't support CIFS or - # vfat for datastores, therefore filtered - ds_type = propdict['summary.type'] - ds_name = propdict['summary.name'] - if ((ds_type == 'VMFS' or ds_type == 'NFS') and - propdict.get('summary.accessible')): - if datastore_regex is None or datastore_regex.match(ds_name): - new_ds = ds_util.Datastore( - ref=obj_content.obj, - name=ds_name, - capacity=propdict['summary.capacity'], - freespace=propdict['summary.freeSpace']) - # favor datastores with more free space - if (best_match is None or - new_ds.freespace > best_match.freespace): - best_match = new_ds - - return best_match - - -def get_datastore(session, cluster=None, host=None, datastore_regex=None): - """Get the datastore list and choose the most preferable one.""" - if cluster is None and host is None: - data_stores = session._call_method(vim_util, "get_objects", - "Datastore", ["summary.type", "summary.name", - "summary.capacity", "summary.freeSpace", - "summary.accessible"]) - else: - if cluster is not None: - datastore_ret = session._call_method( - vim_util, - "get_dynamic_property", cluster, - "ClusterComputeResource", "datastore") - else: - datastore_ret = session._call_method( - vim_util, - "get_dynamic_property", host, - "HostSystem", "datastore") - - if not datastore_ret: - raise exception.DatastoreNotFound() - data_store_mors = datastore_ret.ManagedObjectReference - data_stores = session._call_method(vim_util, - "get_properties_for_a_collection_of_objects", - "Datastore", data_store_mors, - ["summary.type", "summary.name", - "summary.capacity", "summary.freeSpace", - "summary.accessible"]) - best_match = None - while data_stores: - best_match = _select_datastore(data_stores, best_match, - datastore_regex) - token = _get_token(data_stores) - if not token: - break - data_stores = session._call_method(vim_util, - "continue_to_get_objects", - token) - if best_match: - return best_match - if datastore_regex: - raise exception.DatastoreNotFound( - _("Datastore regex %s did not match any datastores") - % datastore_regex.pattern) - else: - raise exception.DatastoreNotFound() - - -def _get_allowed_datastores(data_stores, datastore_regex, allowed_types): - allowed = [] - for obj_content in data_stores.objects: - # the propset attribute "need not be set" by returning API - if not hasattr(obj_content, 'propSet'): - continue - - propdict = propset_dict(obj_content.propSet) - # Local storage identifier vSphere doesn't support CIFS or - # vfat for datastores, therefore filtered - ds_type = propdict['summary.type'] - ds_name = propdict['summary.name'] - if (propdict['summary.accessible'] and ds_type in allowed_types): - if datastore_regex is None or datastore_regex.match(ds_name): - allowed.append({'ref': obj_content.obj, 'name': ds_name}) - - return allowed - - -def get_available_datastores(session, cluster=None, datastore_regex=None): - """Get the datastore list and choose the first local storage.""" - if cluster: - mobj = cluster - resource_type = "ClusterComputeResource" - else: - mobj = get_host_ref(session) - resource_type = "HostSystem" - ds = session._call_method(vim_util, "get_dynamic_property", mobj, - resource_type, "datastore") - if not ds: - return [] - data_store_mors = ds.ManagedObjectReference - # NOTE(garyk): use utility method to retrieve remote objects - data_stores = session._call_method(vim_util, - "get_properties_for_a_collection_of_objects", - "Datastore", data_store_mors, - ["summary.type", "summary.name", "summary.accessible"]) - - allowed = [] - while data_stores: - allowed.extend(_get_allowed_datastores(data_stores, datastore_regex, - ['VMFS', 'NFS'])) - token = _get_token(data_stores) - if not token: - break - - data_stores = session._call_method(vim_util, - "continue_to_get_objects", - token) - return allowed - - def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid): if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 50057315dc..9d7093c7a4 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -195,7 +195,7 @@ def spawn(self, context, instance, image_meta, injected_files, (file_type, is_iso) = self._get_disk_format(image_meta) client_factory = self._session._get_vim().client.factory - datastore = vm_util.get_datastore( + datastore = ds_util.get_datastore( self._session, self._cluster, datastore_regex=self._datastore_regex) dc_info = self.get_datacenter_ref_and_name(datastore.ref) @@ -1156,7 +1156,7 @@ def migrate_disk_and_power_off(self, context, instance, dest, step=2, total_steps=RESIZE_TOTAL_STEPS) - ds_ref = vm_util.get_datastore( + ds_ref = ds_util.get_datastore( self._session, self._cluster, host_ref, datastore_regex=self._datastore_regex).ref dc_info = self.get_datacenter_ref_and_name(ds_ref) @@ -1470,7 +1470,7 @@ def manage_image_cache(self, context, instances): LOG.debug("Image aging disabled. Aging will not be done.") return - datastores = vm_util.get_available_datastores(self._session, + datastores = ds_util.get_available_datastores(self._session, self._cluster, self._datastore_regex) datastores_info = [] From dc677842832bdc710f7ff9ceb532dd1137e18206 Mon Sep 17 00:00:00 2001 From: Vui Lam Date: Thu, 5 Jun 2014 21:11:58 -0700 Subject: [PATCH 063/486] VMware: DatastorePath join() and __eq__() There are quite a number of use cases where we construct additional datastore paths out of one representing some ancestor directory, so implemented a join() method that allows for just that. It is useful to define equality operator when comparing two DatstorePath objects, so added that as well. partial blueprint vmware-spawn-refactor Change-Id: Iec44fdd0beba232b57bcd0a07c39e628208ca9b2 --- nova/tests/virt/vmwareapi/test_ds_util.py | 22 ++++++++++++++++++++++ nova/virt/vmwareapi/ds_util.py | 13 +++++++++++++ 2 files changed, 35 insertions(+) diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py index 2b957e1c5f..883229206f 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util.py +++ b/nova/tests/virt/vmwareapi/test_ds_util.py @@ -384,6 +384,28 @@ def test_ds_path_non_equivalence(self): p = ds_util.DatastorePath(t[0], *t[1]) self.assertNotEqual(str(canonical_p), str(p)) + def test_equal(self): + a = ds_util.DatastorePath('ds_name', 'a') + b = ds_util.DatastorePath('ds_name', 'a') + self.assertEqual(a, b) + + def test_join(self): + p = ds_util.DatastorePath('ds_name', 'a') + ds_path = p.join('b') + self.assertEqual('[ds_name] a/b', str(ds_path)) + + p = ds_util.DatastorePath('ds_name', 'a') + ds_path = p.join() + self.assertEqual('[ds_name] a', str(ds_path)) + + bad_args = [ + [None], + ['', None], + ['a', None], + ['a', None, 'b']] + for arg in bad_args: + self.assertRaises(ValueError, p.join, *arg) + def test_ds_path_parse(self): p = ds_util.DatastorePath.parse('[dsname]') self.assertEqual('dsname', p.datastore) diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index cc76b9a700..06dace3ff2 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -140,6 +140,19 @@ def dirname(self): def rel_path(self): return self._rel_path + def join(self, *paths): + if paths: + if None in paths: + raise ValueError(_("path component cannot be None")) + return DatastorePath(self.datastore, + posixpath.join(self._rel_path, *paths)) + return self + + def __eq__(self, other): + return (isinstance(other, DatastorePath) and + self._datastore_name == other._datastore_name and + self._rel_path == other._rel_path) + @classmethod def parse(cls, datastore_path): """Constructs a DatastorePath object given a datastore path string.""" From c0244717973d70c1d9795756b729aa6a098513a1 Mon Sep 17 00:00:00 2001 From: Vui Lam Date: Thu, 5 Jun 2014 21:25:31 -0700 Subject: [PATCH 064/486] VMware: use datastore classes get_allowed_datastores/_sub_folder Convert the above-mentioned functions to use the datastore utility classes. partial blueprint vmware-spawn-refactor Change-Id: I20c37367c8dca1c8dc20a308ce2cea5229097701 --- nova/tests/virt/vmwareapi/test_imagecache.py | 57 +++++++++++--------- nova/virt/vmwareapi/ds_util.py | 4 +- nova/virt/vmwareapi/imagecache.py | 26 +++++---- nova/virt/vmwareapi/vmops.py | 7 +-- 4 files changed, 49 insertions(+), 45 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_imagecache.py b/nova/tests/virt/vmwareapi/test_imagecache.py index 4917296d7f..1a2a256232 100644 --- a/nova/tests/virt/vmwareapi/test_imagecache.py +++ b/nova/tests/virt/vmwareapi/test_imagecache.py @@ -46,7 +46,7 @@ def tearDown(self): def test_timestamp_cleanup(self): def fake_get_timestamp(ds_browser, ds_path): self.assertEqual('fake-ds-browser', ds_browser) - self.assertEqual('fake-ds-path', ds_path) + self.assertEqual('[fake-ds] fake-path', str(ds_path)) if not self.exists: return ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX, @@ -61,19 +61,21 @@ def fake_get_timestamp(ds_browser, ds_path): ) as (_get_timestamp, _file_delete): self.exists = False self._imagecache.timestamp_cleanup( - 'fake-dc-ref', 'fake-ds-browser', 'fake-ds-path') + 'fake-dc-ref', 'fake-ds-browser', + ds_util.DatastorePath('fake-ds', 'fake-path')) self.assertEqual(0, _file_delete.call_count) self.exists = True self._imagecache.timestamp_cleanup( - 'fake-dc-ref', 'fake-ds-browser', 'fake-ds-path') + 'fake-dc-ref', 'fake-ds-browser', + ds_util.DatastorePath('fake-ds', 'fake-path')) _file_delete.assert_called_once_with(self._session, - 'fake-ds-path/ts-2012-11-22-12-00-00', + '[fake-ds] fake-path/ts-2012-11-22-12-00-00', 'fake-dc-ref') def test_get_timestamp(self): def fake_get_sub_folders(session, ds_browser, ds_path): self.assertEqual('fake-ds-browser', ds_browser) - self.assertEqual('fake-ds-path', ds_path) + self.assertEqual('[fake-ds] fake-path', str(ds_path)) if self.exists: files = set() files.add(self._file_name) @@ -84,12 +86,14 @@ def fake_get_sub_folders(session, ds_browser, ds_path): fake_get_sub_folders) ): self.exists = True - ts = self._imagecache._get_timestamp('fake-ds-browser', - 'fake-ds-path') + ts = self._imagecache._get_timestamp( + 'fake-ds-browser', + ds_util.DatastorePath('fake-ds', 'fake-path')) self.assertEqual(self._file_name, ts) self.exists = False - ts = self._imagecache._get_timestamp('fake-ds-browser', - 'fake-ds-path') + ts = self._imagecache._get_timestamp( + 'fake-ds-browser', + ds_util.DatastorePath('fake-ds', 'fake-path')) self.assertIsNone(ts) def test_get_timestamp_filename(self): @@ -131,9 +135,8 @@ def fake_get_sub_folders(session, ds_browser, ds_path): fake_get_sub_folders) ) as (_get_dynamic, _get_sub_folders): fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref') - datastore = {'name': 'ds', 'ref': fake_ds_ref} - ds_path = ds_util.build_datastore_path(datastore['name'], - 'base_folder') + datastore = ds_util.Datastore(name='ds', ref=fake_ds_ref) + ds_path = datastore.build_path('base_folder') images = self._imagecache._list_datastore_images( ds_path, datastore) originals = set() @@ -146,29 +149,30 @@ def test_age_cached_images(self): def fake_get_ds_browser(ds_ref): return 'fake-ds-browser' - def fake_get_timestamp(ds_browser, path): + def fake_get_timestamp(ds_browser, ds_path): self._get_timestamp_called += 1 - if path == 'fake-ds-path/fake-image-1': + path = str(ds_path) + if path == '[fake-ds] fake-path/fake-image-1': # No time stamp exists return - if path == 'fake-ds-path/fake-image-2': + if path == '[fake-ds] fake-path/fake-image-2': # Timestamp that will be valid => no deletion return 'ts-2012-11-22-10-00-00' - if path == 'fake-ds-path/fake-image-3': + if path == '[fake-ds] fake-path/fake-image-3': # Timestamp that will be invalid => deletion return 'ts-2012-11-20-12-00-00' self.fail() def fake_mkdir(session, ts_path, dc_ref): self.assertEqual( - 'fake-ds-path/fake-image-1/ts-2012-11-22-12-00-00', + '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00', ts_path) - def fake_file_delete(session, path, dc_ref): - self.assertEqual('fake-ds-path/fake-image-3', path) + def fake_file_delete(session, ds_path, dc_ref): + self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path)) - def fake_timestamp_cleanup(dc_ref, ds_browser, path): - self.assertEqual('fake-ds-path/fake-image-4', path) + def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path): + self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path)) with contextlib.nested( mock.patch.object(self._imagecache, '_get_ds_browser', @@ -184,15 +188,16 @@ def fake_timestamp_cleanup(dc_ref, ds_browser, path): ) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete, _timestamp_cleanup): timeutils.set_time_override(override_time=self._time) - datastore = {'name': 'ds', 'ref': 'fake-ds-ref'} + datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref') dc_info = vmops.DcInfo(ref='dc_ref', name='name', vmFolder='vmFolder') self._get_timestamp_called = 0 self._imagecache.originals = set(['fake-image-1', 'fake-image-2', 'fake-image-3', 'fake-image-4']) self._imagecache.used_images = set(['fake-image-4']) - self._imagecache._age_cached_images('fake-context', - datastore, dc_info, 'fake-ds-path') + self._imagecache._age_cached_images( + 'fake-context', datastore, dc_info, + ds_util.DatastorePath('fake-ds', 'fake-path')) self.assertEqual(3, self._get_timestamp_called) def test_update(self): @@ -202,7 +207,7 @@ def fake_list_datastore_images(ds_path, datastore): def fake_age_cached_images(context, datastore, dc_info, ds_path): - self.assertEqual('[ds] fake-base-folder', ds_path) + self.assertEqual('[ds] fake-base-folder', str(ds_path)) self.assertEqual(self.images, self._imagecache.used_images) self.assertEqual(self.images, @@ -228,7 +233,7 @@ def fake_age_cached_images(context, datastore, 'vm_state': '', 'task_state': ''}] self.images = set(['1', '2']) - datastore = {'name': 'ds', 'ref': 'fake-ds-ref'} + datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref') dc_info = vmops.DcInfo(ref='dc_ref', name='name', vmFolder='vmFolder') datastores_info = [(datastore, dc_info)] diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index 06dace3ff2..f06584dbbc 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -282,7 +282,7 @@ def _get_allowed_datastores(data_stores, datastore_regex, allowed_types): ds_name = propdict['summary.name'] if (propdict['summary.accessible'] and ds_type in allowed_types): if datastore_regex is None or datastore_regex.match(ds_name): - allowed.append({'ref': obj_content.obj, 'name': ds_name}) + allowed.append(Datastore(ref=obj_content.obj, name=ds_name)) return allowed @@ -418,7 +418,7 @@ def get_sub_folders(session, ds_browser, ds_path): session._get_vim(), "SearchDatastore_Task", ds_browser, - datastorePath=ds_path) + datastorePath=str(ds_path)) try: task_info = session._wait_for_task(search_task) except error_util.FileNotFoundException: diff --git a/nova/virt/vmwareapi/imagecache.py b/nova/virt/vmwareapi/imagecache.py index 617c78833b..e39808aaa3 100644 --- a/nova/virt/vmwareapi/imagecache.py +++ b/nova/virt/vmwareapi/imagecache.py @@ -78,15 +78,15 @@ def _folder_delete(self, path, dc_ref): def timestamp_folder_get(self, ds_path, image_id): """Returns the timestamp folder.""" - return '%s/%s' % (ds_path, image_id) + return ds_path.join(image_id) def timestamp_cleanup(self, dc_ref, ds_browser, ds_path): ts = self._get_timestamp(ds_browser, ds_path) if ts: - ts_path = '%s/%s' % (ds_path, ts) + ts_path = ds_path.join(ts) LOG.debug("Timestamp path %s exists. Deleting!", ts_path) # Image is used - no longer need timestamp folder - self._folder_delete(ts_path, dc_ref) + self._folder_delete(str(ts_path), dc_ref) def _get_timestamp(self, ds_browser, ds_path): files = ds_util.get_sub_folders(self._session, ds_browser, ds_path) @@ -119,7 +119,7 @@ def _list_datastore_images(self, ds_path, datastore): - unexplained_images - originals """ - ds_browser = self._get_ds_browser(datastore['ref']) + ds_browser = self._get_ds_browser(datastore.ref) originals = ds_util.get_sub_folders(self._session, ds_browser, ds_path) return {'unexplained_images': [], @@ -130,36 +130,35 @@ def _age_cached_images(self, context, datastore, dc_info, """Ages cached images.""" age_seconds = CONF.remove_unused_original_minimum_age_seconds unused_images = self.originals - self.used_images - ds_browser = self._get_ds_browser(datastore['ref']) + ds_browser = self._get_ds_browser(datastore.ref) for image in unused_images: path = self.timestamp_folder_get(ds_path, image) # Lock to ensure that the spawn will not try and access a image # that is currently being deleted on the datastore. - with lockutils.lock(path, lock_file_prefix='nova-vmware-ts', + with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts', external=True): ts = self._get_timestamp(ds_browser, path) if not ts: - ts_path = '%s/%s' % (path, - self._get_timestamp_filename()) + ts_path = path.join(self._get_timestamp_filename()) try: - ds_util.mkdir(self._session, ts_path, dc_info.ref) + ds_util.mkdir(self._session, str(ts_path), dc_info.ref) except error_util.FileAlreadyExistsException: LOG.debug("Timestamp already exists.") LOG.info(_("Image %s is no longer used by this node. " "Pending deletion!"), image) else: - dt = self._get_datetime_from_filename(ts) + dt = self._get_datetime_from_filename(str(ts)) if timeutils.is_older_than(dt, age_seconds): LOG.info(_("Image %s is no longer used. " "Deleting!"), path) # Image has aged - delete the image ID folder - self._folder_delete(path, dc_info.ref) + self._folder_delete(str(path), dc_info.ref) # If the image is used and the timestamp file exists then we delete # the timestamp. for image in self.used_images: path = self.timestamp_folder_get(ds_path, image) - with lockutils.lock(path, lock_file_prefix='nova-vmware-ts', + with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts', external=True): self.timestamp_cleanup(dc_info.ref, ds_browser, path) @@ -176,8 +175,7 @@ def update(self, context, instances, datastores_info): self.used_images = set(running['used_images'].keys()) # perform the aging and image verification per datastore for (datastore, dc_info) in datastores_info: - ds_path = ds_util.build_datastore_path(datastore['name'], - self._base_folder) + ds_path = datastore.build_path(self._base_folder) images = self._list_datastore_images(ds_path, datastore) self.originals = images['originals'] self._age_cached_images(context, datastore, dc_info, ds_path) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 9d7093c7a4..73882dc068 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -308,12 +308,13 @@ def _get_image_properties(root_size): # will ensure that the aging will not delete a cache image if it # is going to be used now. if CONF.remove_unused_base_images: - ds_path = str(datastore.build_path(self._base_folder)) + ds_path = datastore.build_path(self._base_folder) path = self._imagecache.timestamp_folder_get(ds_path, upload_name) # Lock to ensure that the spawn will not try and access a image # that is currently being deleted on the datastore. - with lockutils.lock(path, lock_file_prefix='nova-vmware-ts', + with lockutils.lock(str(path), + lock_file_prefix='nova-vmware-ts', external=True): self._imagecache.timestamp_cleanup(dc_info.ref, ds_browser, path) @@ -1475,7 +1476,7 @@ def manage_image_cache(self, context, instances): self._datastore_regex) datastores_info = [] for ds in datastores: - ds_info = self.get_datacenter_ref_and_name(ds['ref']) + ds_info = self.get_datacenter_ref_and_name(ds.ref) datastores_info.append((ds, ds_info)) self._imagecache.update(context, instances, datastores_info) From 4be42a7f62f5b8f97e2cbdb0ad8f67c81601d682 Mon Sep 17 00:00:00 2001 From: Vui Lam Date: Fri, 6 Jun 2014 03:10:49 -0700 Subject: [PATCH 065/486] VMware: use datastore classes in file_move/delete/exists, mkdir Straightforward conversion of a few functions in ds_util to accepts DatastorePath objects instead of literal strings. partial blueprint vmware-spawn-refactor Change-Id: I9dff390ce020dcc39a05184a7ee69902e3412c9f --- nova/tests/virt/vmwareapi/test_driver_api.py | 13 ++++--- nova/tests/virt/vmwareapi/test_ds_util.py | 23 ++++++++---- nova/tests/virt/vmwareapi/test_imagecache.py | 7 ++-- nova/tests/virt/vmwareapi/test_vmops.py | 2 +- nova/virt/vmwareapi/ds_util.py | 14 +++---- nova/virt/vmwareapi/imagecache.py | 14 +++---- nova/virt/vmwareapi/vmops.py | 39 ++++++++++---------- 7 files changed, 62 insertions(+), 50 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index a8b06d285c..fe85720c82 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -2299,13 +2299,16 @@ def test_snapshot_using_file_manager(self): uuidutils.generate_uuid().AndReturn(uuid_str) self.mox.StubOutWithMock(ds_util, 'file_delete') + disk_ds_path = ds_util.DatastorePath( + self.ds, "vmware_temp", "%s.vmdk" % uuid_str) + disk_ds_flat_path = ds_util.DatastorePath( + self.ds, "vmware_temp", "%s-flat.vmdk" % uuid_str) # Check calls for delete vmdk and -flat.vmdk pair - ds_util.file_delete(mox.IgnoreArg(), - "[%s] vmware_temp/%s-flat.vmdk" % (self.ds, uuid_str), - mox.IgnoreArg()).AndReturn(None) - ds_util.file_delete(mox.IgnoreArg(), - "[%s] vmware_temp/%s.vmdk" % (self.ds, uuid_str), + ds_util.file_delete( + mox.IgnoreArg(), disk_ds_flat_path, mox.IgnoreArg()).AndReturn(None) + ds_util.file_delete( + mox.IgnoreArg(), disk_ds_path, mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() self._test_snapshot() diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py index 883229206f..17b2144ab2 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util.py +++ b/nova/tests/virt/vmwareapi/test_ds_util.py @@ -47,7 +47,7 @@ def test_file_delete(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('DeleteDatastoreFile_Task', method) name = kwargs.get('name') - self.assertEqual('fake-datastore-path', name) + self.assertEqual('[ds] fake/path', name) datacenter = kwargs.get('datacenter') self.assertEqual('fake-dc-ref', datacenter) return 'fake_delete_task' @@ -57,8 +57,9 @@ def fake_call_method(module, method, *args, **kwargs): mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): + ds_path = ds_util.DatastorePath('ds', 'fake/path') ds_util.file_delete(self.session, - 'fake-datastore-path', 'fake-dc-ref') + ds_path, 'fake-dc-ref') _wait_for_task.assert_has_calls([ mock.call('fake_delete_task')]) @@ -80,8 +81,10 @@ def fake_call_method(module, method, *args, **kwargs): mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): + src_ds_path = ds_util.DatastorePath('ds', 'tmp/src') + dst_ds_path = ds_util.DatastorePath('ds', 'base/dst') ds_util.file_move(self.session, - 'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst') + 'fake-dc-ref', src_ds_path, dst_ds_path) _wait_for_task.assert_has_calls([ mock.call('fake_move_task')]) @@ -89,7 +92,7 @@ def test_mkdir(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MakeDirectory', method) name = kwargs.get('name') - self.assertEqual('fake-path', name) + self.assertEqual('[ds] fake/path', name) datacenter = kwargs.get('datacenter') self.assertEqual('fake-dc-ref', datacenter) createParentDirectories = kwargs.get('createParentDirectories') @@ -97,7 +100,8 @@ def fake_call_method(module, method, *args, **kwargs): with mock.patch.object(self.session, '_call_method', fake_call_method): - ds_util.mkdir(self.session, 'fake-path', 'fake-dc-ref') + ds_path = ds_util.DatastorePath('ds', 'fake/path') + ds_util.mkdir(self.session, ds_path, 'fake-dc-ref') def test_file_exists(self): def fake_call_method(module, method, *args, **kwargs): @@ -105,7 +109,7 @@ def fake_call_method(module, method, *args, **kwargs): ds_browser = args[0] self.assertEqual('fake-browser', ds_browser) datastorePath = kwargs.get('datastorePath') - self.assertEqual('fake-path', datastorePath) + self.assertEqual('[ds] fake/path', datastorePath) return 'fake_exists_task' # Should never get here @@ -118,6 +122,7 @@ def fake_wait_for_task(task_ref): result = fake.DataObject() result.file = [result_file] + result.path = '[ds] fake/path' task_info = fake.DataObject() task_info.result = result @@ -132,8 +137,9 @@ def fake_wait_for_task(task_ref): fake_call_method), mock.patch.object(self.session, '_wait_for_task', fake_wait_for_task)): + ds_path = ds_util.DatastorePath('ds', 'fake/path') file_exists = ds_util.file_exists(self.session, - 'fake-browser', 'fake-path', 'fake-file') + 'fake-browser', ds_path, 'fake-file') self.assertTrue(file_exists) def test_file_exists_fails(self): @@ -156,8 +162,9 @@ def fake_wait_for_task(task_ref): fake_call_method), mock.patch.object(self.session, '_wait_for_task', fake_wait_for_task)): + ds_path = ds_util.DatastorePath('ds', 'fake/path') file_exists = ds_util.file_exists(self.session, - 'fake-browser', 'fake-path', 'fake-file') + 'fake-browser', ds_path, 'fake-file') self.assertFalse(file_exists) def test_get_datastore(self): diff --git a/nova/tests/virt/vmwareapi/test_imagecache.py b/nova/tests/virt/vmwareapi/test_imagecache.py index 1a2a256232..3c3108d7f0 100644 --- a/nova/tests/virt/vmwareapi/test_imagecache.py +++ b/nova/tests/virt/vmwareapi/test_imagecache.py @@ -68,9 +68,10 @@ def fake_get_timestamp(ds_browser, ds_path): self._imagecache.timestamp_cleanup( 'fake-dc-ref', 'fake-ds-browser', ds_util.DatastorePath('fake-ds', 'fake-path')) + expected_ds_path = ds_util.DatastorePath( + 'fake-ds', 'fake-path', self._file_name) _file_delete.assert_called_once_with(self._session, - '[fake-ds] fake-path/ts-2012-11-22-12-00-00', - 'fake-dc-ref') + expected_ds_path, 'fake-dc-ref') def test_get_timestamp(self): def fake_get_sub_folders(session, ds_browser, ds_path): @@ -166,7 +167,7 @@ def fake_get_timestamp(ds_browser, ds_path): def fake_mkdir(session, ts_path, dc_ref): self.assertEqual( '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00', - ts_path) + str(ts_path)) def fake_file_delete(session, ds_path, dc_ref): self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path)) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index a093bff658..a47fb2bb01 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -184,7 +184,7 @@ def _setup_create_folder_mocks(self): ref=dc_ref, name='fake-name', vmFolder='fake-folder') - path = ds_util.build_datastore_path(ds_name, base_name) + path = ds_util.DatastorePath(ds_name, base_name) ds_util.mkdir = mock.Mock() return ds_name, ds_ref, ops, path, dc_ref diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index f06584dbbc..01176b2096 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -320,14 +320,14 @@ def get_available_datastores(session, cluster=None, datastore_regex=None): return allowed -def file_delete(session, datastore_path, dc_ref): - LOG.debug("Deleting the datastore file %s", datastore_path) +def file_delete(session, ds_path, dc_ref): + LOG.debug("Deleting the datastore file %s", ds_path) vim = session._get_vim() file_delete_task = session._call_method( session._get_vim(), "DeleteDatastoreFile_Task", vim.get_service_content().fileManager, - name=datastore_path, + name=str(ds_path), datacenter=dc_ref) session._wait_for_task(file_delete_task) LOG.debug("Deleted the datastore file") @@ -362,9 +362,9 @@ def file_move(session, dc_ref, src_file, dst_file): session._get_vim(), "MoveDatastoreFile_Task", vim.get_service_content().fileManager, - sourceName=src_file, + sourceName=str(src_file), sourceDatacenter=dc_ref, - destinationName=dst_file, + destinationName=str(dst_file), destinationDatacenter=dc_ref) session._wait_for_task(move_task) LOG.debug("File moved") @@ -384,7 +384,7 @@ def file_exists(session, ds_browser, ds_path, file_name): search_task = session._call_method(session._get_vim(), "SearchDatastore_Task", ds_browser, - datastorePath=ds_path, + datastorePath=str(ds_path), searchSpec=search_spec) try: task_info = session._wait_for_task(search_task) @@ -404,7 +404,7 @@ def mkdir(session, ds_path, dc_ref): LOG.debug("Creating directory with path %s", ds_path) session._call_method(session._get_vim(), "MakeDirectory", session._get_vim().get_service_content().fileManager, - name=ds_path, datacenter=dc_ref, + name=str(ds_path), datacenter=dc_ref, createParentDirectories=True) LOG.debug("Created directory with path %s", ds_path) diff --git a/nova/virt/vmwareapi/imagecache.py b/nova/virt/vmwareapi/imagecache.py index e39808aaa3..7713fc8d4f 100644 --- a/nova/virt/vmwareapi/imagecache.py +++ b/nova/virt/vmwareapi/imagecache.py @@ -63,18 +63,18 @@ def __init__(self, session, base_folder): self._base_folder = base_folder self._ds_browser = {} - def _folder_delete(self, path, dc_ref): + def _folder_delete(self, ds_path, dc_ref): try: - ds_util.file_delete(self._session, path, dc_ref) + ds_util.file_delete(self._session, ds_path, dc_ref) except (error_util.CannotDeleteFileException, error_util.FileFaultException, error_util.FileLockedException) as e: # There may be more than one process or thread that tries # to delete the file. LOG.warning(_("Unable to delete %(file)s. Exception: %(ex)s"), - {'file': path, 'ex': e}) + {'file': ds_path, 'ex': e}) except error_util.FileNotFoundException: - LOG.debug("File not found: %s", path) + LOG.debug("File not found: %s", ds_path) def timestamp_folder_get(self, ds_path, image_id): """Returns the timestamp folder.""" @@ -86,7 +86,7 @@ def timestamp_cleanup(self, dc_ref, ds_browser, ds_path): ts_path = ds_path.join(ts) LOG.debug("Timestamp path %s exists. Deleting!", ts_path) # Image is used - no longer need timestamp folder - self._folder_delete(str(ts_path), dc_ref) + self._folder_delete(ts_path, dc_ref) def _get_timestamp(self, ds_browser, ds_path): files = ds_util.get_sub_folders(self._session, ds_browser, ds_path) @@ -141,7 +141,7 @@ def _age_cached_images(self, context, datastore, dc_info, if not ts: ts_path = path.join(self._get_timestamp_filename()) try: - ds_util.mkdir(self._session, str(ts_path), dc_info.ref) + ds_util.mkdir(self._session, ts_path, dc_info.ref) except error_util.FileAlreadyExistsException: LOG.debug("Timestamp already exists.") LOG.info(_("Image %s is no longer used by this node. " @@ -152,7 +152,7 @@ def _age_cached_images(self, context, datastore, dc_info, LOG.info(_("Image %s is no longer used. " "Deleting!"), path) # Image has aged - delete the image ID folder - self._folder_delete(str(path), dc_info.ref) + self._folder_delete(path, dc_info.ref) # If the image is used and the timestamp file exists then we delete # the timestamp. diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 73882dc068..254be664b8 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -133,7 +133,8 @@ def _extend_virtual_disk(self, instance, requested_size, name, dc_ref): # Clean up files created during the extend operation files = [name.replace(".vmdk", "-flat.vmdk"), name] for file in files: - self._delete_datastore_file(instance, file, dc_ref) + ds_path = ds_util.DatastorePath.parse(file) + self._delete_datastore_file(instance, ds_path, dc_ref) LOG.debug("Extended root virtual disk") @@ -353,7 +354,7 @@ def _get_image_properties(root_size): # directory. ds_util.mkdir( self._session, - str(datastore.build_path(upload_folder)), + datastore.build_path(upload_folder), dc_info.ref) LOG.debug("Create virtual disk on %s", datastore.name, instance=instance) @@ -366,7 +367,7 @@ def _get_image_properties(root_size): LOG.debug("Virtual disk created on %s.", datastore.name, instance=instance) self._delete_datastore_file(instance, - str(flat_ds_loc), + flat_ds_loc, dc_info.ref) upload_rel_path = flat_ds_loc.rel_path else: @@ -391,11 +392,11 @@ def _get_image_properties(root_size): str(upload_path_loc), copy_spec) self._delete_datastore_file(instance, - str(sparse_ds_loc), + sparse_ds_loc, dc_info.ref) base_folder = '%s/%s' % (self._base_folder, upload_name) - dest_folder = str(datastore.build_path(base_folder)) - src_folder = str(datastore.build_path(upload_folder)) + dest_folder = datastore.build_path(base_folder) + src_folder = datastore.build_path(upload_folder) try: ds_util.file_move(self._session, dc_info.ref, src_folder, dest_folder) @@ -408,8 +409,8 @@ def _get_image_properties(root_size): # Delete the temp upload folder self._delete_datastore_file(instance, - str(datastore.build_path( - tmp_upload_folder)), + datastore.build_path( + tmp_upload_folder), dc_info.ref) else: # linked clone base disk exists @@ -762,10 +763,10 @@ def _check_if_tmp_folder_exists(): # will be copied to. A random name is chosen so that we don't have # name clashes. random_name = uuidutils.generate_uuid() - dest_vmdk_file_path = ds_util.build_datastore_path(datastore_name, - "%s/%s.vmdk" % (self._tmp_folder, random_name)) - dest_vmdk_data_file_path = ds_util.build_datastore_path(datastore_name, - "%s/%s-flat.vmdk" % (self._tmp_folder, random_name)) + dest_vmdk_file_path = ds_util.DatastorePath( + datastore_name, self._tmp_folder, "%s.vmdk" % random_name) + dest_vmdk_data_file_path = ds_util.DatastorePath( + datastore_name, self._tmp_folder, "%s-flat.vmdk" % random_name) dc_info = self.get_datacenter_ref_and_name(ds_ref) def _copy_vmdk_content(): @@ -782,7 +783,7 @@ def _copy_vmdk_content(): service_content.virtualDiskManager, sourceName=vmdk_file_path_before_snapshot, sourceDatacenter=dc_info.ref, - destName=dest_vmdk_file_path, + destName=str(dest_vmdk_file_path), destDatacenter=dc_info.ref, destSpec=copy_spec, force=False) @@ -918,7 +919,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True, # the datastore. if destroy_disks and vm_ds_path: try: - dir_ds_compliant_path = str(vm_ds_path.parent) + dir_ds_compliant_path = vm_ds_path.parent LOG.debug("Deleting contents of the VM from " "datastore %(datastore_name)s", {'datastore_name': vm_ds_path.datastore}, @@ -1431,7 +1432,7 @@ def _create_folder_if_missing(self, ds_name, ds_ref, folder): exists. If this throws and exception 'FileAlreadyExistsException' then the folder already exists on the datastore. """ - path = ds_util.build_datastore_path(ds_name, folder) + path = ds_util.DatastorePath(ds_name, folder) dc_info = self.get_datacenter_ref_and_name(ds_ref) try: ds_util.mkdir(self._session, path, dc_info.ref) @@ -1454,9 +1455,9 @@ def _check_if_folder_file_exists(self, ds_browser, ds_ref, ds_name, # Ensure that the cache folder exists self.check_cache_folder(ds_name, ds_ref) # Check if the file exists or not. - folder_path = ds_util.build_datastore_path(ds_name, folder_name) + folder_ds_path = ds_util.DatastorePath(ds_name, folder_name) file_exists = ds_util.file_exists(self._session, ds_browser, - folder_path, file_name) + folder_ds_path, file_name) return file_exists def inject_network_info(self, instance, network_info): @@ -1476,8 +1477,8 @@ def manage_image_cache(self, context, instances): self._datastore_regex) datastores_info = [] for ds in datastores: - ds_info = self.get_datacenter_ref_and_name(ds.ref) - datastores_info.append((ds, ds_info)) + dc_info = self.get_datacenter_ref_and_name(ds.ref) + datastores_info.append((ds, dc_info)) self._imagecache.update(context, instances, datastores_info) def _get_valid_vms_from_retrieve_result(self, retrieve_result): From 45b8e3a947304132dee8def30e66eb9c500b76ee Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 30 Jun 2014 16:03:51 +0100 Subject: [PATCH 066/486] VMware: Trivial indentation cleanups in vmops This change has been split out of https://review.openstack.org/#/c/87002/, which was written by Shawn Hartsock. TrivialFix Co-authored-by: Shawn Hartsock Change-Id: I6c3ee19b019668a7509ee9835a53271f98805f8f --- nova/virt/vmwareapi/vmops.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 254be664b8..3f05470718 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -420,7 +420,8 @@ def _get_image_properties(root_size): if is_iso: if root_gb_in_kb: dest_vmdk_path = self._get_vmdk_path(datastore.name, - instance['uuid'], instance_name) + instance['uuid'], + instance_name) # Create the blank virtual disk for the VM LOG.debug("Create blank virtual disk on %s", datastore.name, instance=instance) @@ -596,7 +597,7 @@ def _create_config_drive(self, instance, injected_files, admin_password, e, instance=instance) def _attach_cdrom_to_vm(self, vm_ref, instance, - datastore, file_path): + datastore, file_path): """Attach cdrom to VM by reconfiguration.""" instance_name = instance['name'] client_factory = self._session._get_vim().client.factory From 23f0067452aa93f468032e0ef19a21f213d50c00 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Tue, 1 Jul 2014 09:59:14 +0100 Subject: [PATCH 067/486] VMware: Convert vmops to use instance as an object This is mostly mechanical, but it does expose what appears to be a bug in the interaction between object.Instance's config_drive field and configdrive.required_by(). object.Instance defines config_drive to be a String, but configdrive.required_by() expects it to be a Boolean. In assigning a boolean value to config_drive, it is cast to a String, which evaluates as True when cast back to a Boolean. We therefore update the tests which use config_drive to set either a String value, or None, which is what other code does. Note that Nova does not pass an Instance object to all driver methods, so not all uses can be updated. Change-Id: Ifc9ce5d96da30abfa09b608a9c8615ede1485a50 --- nova/tests/virt/vmwareapi/test_configdrive.py | 52 ++++++++++--------- nova/virt/vmwareapi/vmops.py | 35 +++++++------ 2 files changed, 45 insertions(+), 42 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_configdrive.py b/nova/tests/virt/vmwareapi/test_configdrive.py index e957dbdb02..42b1b49e14 100644 --- a/nova/tests/virt/vmwareapi/test_configdrive.py +++ b/nova/tests/virt/vmwareapi/test_configdrive.py @@ -23,6 +23,7 @@ from nova import context from nova.image import glance from nova import test +from nova.tests import fake_instance import nova.tests.image.fake from nova.tests import utils from nova.tests.virt.vmwareapi import fake as vmwareapi_fake @@ -56,28 +57,29 @@ def setUp(self): self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0], cluster_name) image_ref = nova.tests.image.fake.get_valid_image_id() - self.test_instance = {'vm_state': 'building', - 'project_id': 'fake', - 'user_id': 'fake', - 'name': '1', - 'kernel_id': '1', - 'ramdisk_id': '1', - 'mac_addresses': [ - {'address': 'de:ad:be:ef:be:ef'} - ], - 'memory_mb': 8192, - 'flavor': 'm1.large', - 'vcpus': 4, - 'root_gb': 80, - 'image_ref': image_ref, - 'host': 'fake_host', - 'task_state': - 'scheduling', - 'reservation_id': 'r-3t8muvr0', - 'id': 1, - 'uuid': 'fake-uuid', - 'node': self.node_name, - 'metadata': []} + instance_values = { + 'vm_state': 'building', + 'project_id': 'fake', + 'user_id': 'fake', + 'name': '1', + 'kernel_id': '1', + 'ramdisk_id': '1', + 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}], + 'memory_mb': 8192, + 'flavor': 'm1.large', + 'vcpus': 4, + 'root_gb': 80, + 'image_ref': image_ref, + 'host': 'fake_host', + 'task_state': 'scheduling', + 'reservation_id': 'r-3t8muvr0', + 'id': 1, + 'uuid': 'fake-uuid', + 'node': self.node_name, + 'metadata': [] + } + self.test_instance = fake_instance.fake_instance_obj(self.context, + **instance_values) (image_service, image_id) = glance.get_remote_image_service(context, image_ref) @@ -160,7 +162,7 @@ def fake_write_handle(host, dc_name, ds_name, cookies, def test_create_vm_with_config_drive_verify_method_invocation(self): self.instance = copy.deepcopy(self.test_instance) - self.instance['config_drive'] = True + self.instance['config_drive'] = 'True' self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive') self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm') self.conn._vmops._create_config_drive(self.instance, @@ -184,7 +186,7 @@ def test_create_vm_with_config_drive_verify_method_invocation(self): def test_create_vm_without_config_drive(self): self.instance = copy.deepcopy(self.test_instance) - self.instance['config_drive'] = False + self.instance['config_drive'] = None self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive') self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm') self.mox.ReplayAll() @@ -195,5 +197,5 @@ def test_create_vm_without_config_drive(self): def test_create_vm_with_config_drive(self): self.instance = copy.deepcopy(self.test_instance) - self.instance['config_drive'] = True + self.instance['config_drive'] = 'True' self._spawn_vm() diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 3f05470718..61e712ba6d 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -209,7 +209,7 @@ def _get_image_properties(root_size): """Get the Size of the flat vmdk file that is there on the storage repository. """ - image_ref = instance.get('image_ref') + image_ref = instance.image_ref if image_ref: _image_info = vmware_images.get_vmdk_size_and_properties( context, image_ref, instance) @@ -238,7 +238,7 @@ def _get_image_properties(root_size): return (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, image_linked_clone) - root_gb = instance['root_gb'] + root_gb = instance.root_gb root_gb_in_kb = root_gb * units.Mi (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, @@ -246,7 +246,7 @@ def _get_image_properties(root_size): if root_gb_in_kb and vmdk_file_size_in_kb > root_gb_in_kb: reason = _("Image disk size greater than requested disk size") - raise exception.InstanceUnacceptable(instance_id=instance['uuid'], + raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) node_mo_id = vm_util.get_mo_id_from_instance(instance) @@ -260,7 +260,7 @@ def _get_image_properties(root_size): # Get the instance name. In some cases this may differ from the 'uuid', # for example when the spawn of a rescue instance takes place. if not instance_name: - instance_name = instance['uuid'] + instance_name = instance.uuid # Create the VM config_spec = vm_util.get_vm_create_spec( @@ -292,7 +292,7 @@ def _get_image_properties(root_size): image_linked_clone, CONF.vmware.use_linked_clone ) - upload_name = instance['image_ref'] + upload_name = instance.image_ref upload_folder = '%s/%s' % (self._base_folder, upload_name) # The vmdk meta-data file @@ -420,7 +420,7 @@ def _get_image_properties(root_size): if is_iso: if root_gb_in_kb: dest_vmdk_path = self._get_vmdk_path(datastore.name, - instance['uuid'], + instance.uuid, instance_name) # Create the blank virtual disk for the VM LOG.debug("Create blank virtual disk on %s", @@ -458,7 +458,8 @@ def _get_image_properties(root_size): root_vmdk_path, dc_info.ref) else: upload_folder = '%s/%s' % (self._base_folder, upload_name) - root_vmdk_name = "%s.%s.vmdk" % (upload_name, root_gb) + root_vmdk_name = "%s.%s.vmdk" % (upload_name, + instance.root_gb) root_vmdk_path = str(datastore.build_path( upload_folder, root_vmdk_name)) @@ -482,7 +483,7 @@ def _get_image_properties(root_size): upload_folder, root_vmdk_name): LOG.debug("Copying root disk of size %sGb", - root_gb) + instance.root_gb) copy_spec = self.get_copy_virtual_disk_spec( client_factory, adapter_type, disk_type) @@ -541,7 +542,7 @@ def _get_image_properties(root_size): admin_password, datastore.name, dc_info.name, - instance['uuid'], + instance.uuid, cookies) uploaded_iso_path = ds_util.build_datastore_path( datastore.name, @@ -599,7 +600,7 @@ def _create_config_drive(self, instance, injected_files, admin_password, def _attach_cdrom_to_vm(self, vm_ref, instance, datastore, file_path): """Attach cdrom to VM by reconfiguration.""" - instance_name = instance['name'] + instance_name = instance.name client_factory = self._session._get_vim().client.factory devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, @@ -680,7 +681,7 @@ def _create_vm_snapshot(self, instance, vm_ref): snapshot_task = self._session._call_method( self._session._get_vim(), "CreateSnapshot_Task", vm_ref, - name="%s-snapshot" % instance['uuid'], + name="%s-snapshot" % instance.uuid, description="Taking Snapshot of the VM", memory=False, quiesce=True) @@ -727,7 +728,7 @@ def _get_vm_and_vmdk_attribs(): "VirtualMachine", "config.hardware.device") (vmdk_file_path_before_snapshot, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type( - hw_devices, uuid=instance['uuid']) + hw_devices, uuid=instance.uuid) if not vmdk_file_path_before_snapshot: LOG.debug("No root disk defined. Unable to snapshot.") raise error_util.NoRootDiskDefined() @@ -876,7 +877,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True, # Destroy a VM instance # Get the instance name. In some cases this may differ from the 'uuid', # for example when the spawn of a rescue instance takes place. - if not instance_name: + if instance_name is None: instance_name = instance['uuid'] try: vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) @@ -1109,7 +1110,7 @@ def power_on(self, instance): vm_util.power_on_instance(self._session, instance) def _get_orig_vm_name_label(self, instance): - return instance['uuid'] + '-orig' + return instance.uuid + '-orig' def _update_instance_progress(self, context, instance, step, total_steps): """Update instance progress percent to reflect current step number @@ -1173,11 +1174,11 @@ def migrate_disk_and_power_off(self, context, instance, dest, def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" # Destroy the original VM. The vm_ref needs to be searched using the - # instance['uuid'] + self._migrate_suffix as the identifier. We will + # instance.uuid + self._migrate_suffix as the identifier. We will # not get the vm when searched using the instanceUuid but rather will # be found using the uuid buried in the extraConfig vm_ref = vm_util.search_vm_ref_by_identifier(self._session, - instance['uuid'] + self._migrate_suffix) + instance.uuid + self._migrate_suffix) if vm_ref is None: LOG.debug("instance not present", instance=instance) return @@ -1607,6 +1608,6 @@ def get_vnc_console(self, instance): # NOTE: VM can move hosts in some situations. Debug for admins. LOG.debug("VM %(uuid)s is currently on host %(host_name)s", - {'uuid': instance['name'], 'host_name': host_name}, + {'uuid': instance.name, 'host_name': host_name}, instance=instance) return vnc_console From bd1f3300945a8b4fe0d09bbe8c2d40eb40b6d111 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 14 Jul 2014 17:12:25 +0100 Subject: [PATCH 068/486] VMware: Remove unnecessary deepcopy()s in test_configdrive test_configdrive was doing a deepcopy of its test_instance object in some tests. This was unnecessary, as the object is created fresh in setUp(). Also, change a couple of dict lookups of test_instance to object properties. Change-Id: I3a5039fde9f8fba5285d90b28743b7af8cd1a168 --- nova/tests/virt/vmwareapi/test_configdrive.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_configdrive.py b/nova/tests/virt/vmwareapi/test_configdrive.py index 42b1b49e14..a52280afbe 100644 --- a/nova/tests/virt/vmwareapi/test_configdrive.py +++ b/nova/tests/virt/vmwareapi/test_configdrive.py @@ -14,7 +14,6 @@ # under the License. import contextlib -import copy import fixtures import mock @@ -125,7 +124,7 @@ def _spawn_vm(self, injected_files=None, admin_password=None, injected_files = injected_files or [] read_file_handle = mock.MagicMock() write_file_handle = mock.MagicMock() - self.image_ref = self.instance['image_ref'] + self.image_ref = self.test_instance.image_ref def fake_read_handle(read_iter): return read_file_handle @@ -151,7 +150,7 @@ def fake_write_handle(host, dc_name, ds_name, cookies, side_effect=fake_read_handle), mock.patch.object(vmware_images, 'start_transfer') ) as (fake_http_write, fake_glance_read, fake_start_transfer): - self.conn.spawn(self.context, self.instance, self.image, + self.conn.spawn(self.context, self.test_instance, self.image, injected_files=injected_files, admin_password=admin_password, network_info=self.network_info, @@ -161,11 +160,10 @@ def fake_write_handle(host, dc_name, ds_name, cookies, write_file_handle=write_file_handle) def test_create_vm_with_config_drive_verify_method_invocation(self): - self.instance = copy.deepcopy(self.test_instance) - self.instance['config_drive'] = 'True' + self.test_instance.config_drive = 'True' self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive') self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm') - self.conn._vmops._create_config_drive(self.instance, + self.conn._vmops._create_config_drive(self.test_instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), @@ -185,8 +183,7 @@ def test_create_vm_with_config_drive_verify_method_invocation(self): self._spawn_vm() def test_create_vm_without_config_drive(self): - self.instance = copy.deepcopy(self.test_instance) - self.instance['config_drive'] = None + self.test_instance.config_drive = None self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive') self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm') self.mox.ReplayAll() @@ -196,6 +193,5 @@ def test_create_vm_without_config_drive(self): self._spawn_vm() def test_create_vm_with_config_drive(self): - self.instance = copy.deepcopy(self.test_instance) - self.instance['config_drive'] = 'True' + self.test_instance.config_drive = 'True' self._spawn_vm() From 74d06db19fe0037435c12e52c3c88f980e619420 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 15 Jul 2014 14:58:32 +0200 Subject: [PATCH 069/486] Partial oslo-incubator sync Generated with: python update.py --base nova --dest-dir ~/Develop/tmp/nova/ --modules timeutils,sslutils,importutils,memorycache,strutils,context,fileutils,versionutils,systemd context: 9b73877 Add a RequestContext.from_dict method 85d1ce6 Python 3: enable tests/unit/middleware/test_request_id.py fileutils: 2b966f9 Fix deletion of cached file for policy enforcer gettextutils: 3d90045 Backport code for i18n to check lazy at runtime jsonutils: ef37e03 Added missing jsonutils.dump() function log: 5cac11d Merge "Add default log level for websocket" 433fa0b Make logging_context_format_string optional in log.set_defaults ac92c06 Add default log level for websocket 0aa2bd4 Merge "Ability to customize default_log_levels for each project" 5fd77eb Ability to customize default_log_levels for each project i4d9328c Python 3: enable tests/unit/test_log.py 722f418 Merge "update new requests logger to default WARN" cb5a804 Move `mask_password` to strutils memorycache: 90ae24b Remove redundant default=None for config options 297d772 Raise exception when importing memcache error strutils: cb5a804 Move `mask_password` to strutils systemd: 17c4e21 Fix docstring indentation in systemd 667d1ba Fixed spelling error - occured to occurred versionutils: a2ad3a2 Allow deprecated decorator to specify no plan for removal 05ae498 Add JUNO as a target to versionutils module Change-Id: I9e8e8e9b15075d99cf394170c6f5300e7bd0d4cc --- nova/openstack/common/context.py | 17 ++++- nova/openstack/common/fileutils.py | 15 +++- nova/openstack/common/gettextutils.py | 63 ++++++---------- nova/openstack/common/jsonutils.py | 4 ++ nova/openstack/common/log.py | 100 ++++++++------------------ nova/openstack/common/memorycache.py | 8 +-- nova/openstack/common/sslutils.py | 3 - nova/openstack/common/strutils.py | 56 +++++++++++++++ nova/openstack/common/systemd.py | 6 +- nova/openstack/common/versionutils.py | 37 +++++++++- 10 files changed, 182 insertions(+), 127 deletions(-) diff --git a/nova/openstack/common/context.py b/nova/openstack/common/context.py index 09019ee384..b612db7140 100644 --- a/nova/openstack/common/context.py +++ b/nova/openstack/common/context.py @@ -25,7 +25,7 @@ def generate_request_id(): - return 'req-%s' % str(uuid.uuid4()) + return b'req-' + str(uuid.uuid4()).encode('ascii') class RequestContext(object): @@ -77,6 +77,21 @@ def to_dict(self): 'instance_uuid': self.instance_uuid, 'user_identity': user_idt} + @classmethod + def from_dict(cls, ctx): + return cls( + auth_token=ctx.get("auth_token"), + user=ctx.get("user"), + tenant=ctx.get("tenant"), + domain=ctx.get("domain"), + user_domain=ctx.get("user_domain"), + project_domain=ctx.get("project_domain"), + is_admin=ctx.get("is_admin", False), + read_only=ctx.get("read_only", False), + show_deleted=ctx.get("show_deleted", False), + request_id=ctx.get("request_id"), + instance_uuid=ctx.get("instance_uuid")) + def get_admin_context(show_deleted=False): context = RequestContext(None, diff --git a/nova/openstack/common/fileutils.py b/nova/openstack/common/fileutils.py index 16050a138f..12ae198303 100644 --- a/nova/openstack/common/fileutils.py +++ b/nova/openstack/common/fileutils.py @@ -50,8 +50,8 @@ def read_cached_file(filename, force_reload=False): """ global _FILE_CACHE - if force_reload and filename in _FILE_CACHE: - del _FILE_CACHE[filename] + if force_reload: + delete_cached_file(filename) reloaded = False mtime = os.path.getmtime(filename) @@ -66,6 +66,17 @@ def read_cached_file(filename, force_reload=False): return (reloaded, cache_info['data']) +def delete_cached_file(filename): + """Delete cached file if present. + + :param filename: filename to delete + """ + global _FILE_CACHE + + if filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + def delete_if_exists(path, remove=os.unlink): """Delete a file, but ignore file not found error. diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py index dcb53582a1..4dd2ac6594 100644 --- a/nova/openstack/common/gettextutils.py +++ b/nova/openstack/common/gettextutils.py @@ -23,7 +23,6 @@ """ import copy -import functools import gettext import locale from logging import handlers @@ -42,7 +41,7 @@ class TranslatorFactory(object): """Create translator functions """ - def __init__(self, domain, lazy=False, localedir=None): + def __init__(self, domain, localedir=None): """Establish a set of translation functions for the domain. :param domain: Name of translation domain, @@ -55,7 +54,6 @@ def __init__(self, domain, lazy=False, localedir=None): :type localedir: str """ self.domain = domain - self.lazy = lazy if localedir is None: localedir = os.environ.get(domain.upper() + '_LOCALEDIR') self.localedir = localedir @@ -75,16 +73,19 @@ def _make_translation_func(self, domain=None): """ if domain is None: domain = self.domain - if self.lazy: - return functools.partial(Message, domain=domain) - t = gettext.translation( - domain, - localedir=self.localedir, - fallback=True, - ) - if six.PY3: - return t.gettext - return t.ugettext + t = gettext.translation(domain, + localedir=self.localedir, + fallback=True) + # Use the appropriate method of the translation object based + # on the python version. + m = t.gettext if six.PY3 else t.ugettext + + def f(msg): + """oslo.i18n.gettextutils translation function.""" + if USE_LAZY: + return Message(msg, domain=domain) + return m(msg) + return f @property def primary(self): @@ -147,19 +148,11 @@ def enable_lazy(): your project is importing _ directly instead of using the gettextutils.install() way of importing the _ function. """ - # FIXME(dhellmann): This function will be removed in oslo.i18n, - # because the TranslatorFactory makes it superfluous. - global _, _LI, _LW, _LE, _LC, USE_LAZY - tf = TranslatorFactory('nova', lazy=True) - _ = tf.primary - _LI = tf.log_info - _LW = tf.log_warning - _LE = tf.log_error - _LC = tf.log_critical + global USE_LAZY USE_LAZY = True -def install(domain, lazy=False): +def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's @@ -170,26 +163,14 @@ def install(domain, lazy=False): a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). + Note that to enable lazy translation, enable_lazy must be + called. + :param domain: the translation domain - :param lazy: indicates whether or not to install the lazy _() function. - The lazy _() introduces a way to do deferred translation - of messages by installing a _ that builds Message objects, - instead of strings, which can then be lazily translated into - any available locale. """ - if lazy: - from six import moves - tf = TranslatorFactory(domain, lazy=True) - moves.builtins.__dict__['_'] = tf.primary - else: - localedir = '%s_LOCALEDIR' % domain.upper() - if six.PY3: - gettext.install(domain, - localedir=os.environ.get(localedir)) - else: - gettext.install(domain, - localedir=os.environ.get(localedir), - unicode=True) + from six import moves + tf = TranslatorFactory(domain) + moves.builtins.__dict__['_'] = tf.primary class Message(six.text_type): diff --git a/nova/openstack/common/jsonutils.py b/nova/openstack/common/jsonutils.py index 650c983281..a201b8c817 100644 --- a/nova/openstack/common/jsonutils.py +++ b/nova/openstack/common/jsonutils.py @@ -168,6 +168,10 @@ def dumps(value, default=to_primitive, **kwargs): return json.dumps(value, default=default, **kwargs) +def dump(obj, fp, *args, **kwargs): + return json.dump(obj, fp, *args, **kwargs) + + def loads(s, encoding='utf-8', **kwargs): return json.loads(strutils.safe_decode(s, encoding), **kwargs) diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py index 354af01e25..bc9cbfa411 100644 --- a/nova/openstack/common/log.py +++ b/nova/openstack/common/log.py @@ -33,7 +33,6 @@ import logging.config import logging.handlers import os -import re import sys import traceback @@ -45,30 +44,13 @@ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import local +# NOTE(flaper87): Pls, remove when graduating this module +# from the incubator. +from nova.openstack.common.strutils import mask_password # noqa _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" -_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] - -# NOTE(ldbragst): Let's build a list of regex objects using the list of -# _SANITIZE_KEYS we already have. This way, we only have to add the new key -# to the list of _SANITIZE_KEYS and we can generate regular expressions -# for XML and JSON automatically. -_SANITIZE_PATTERNS = [] -_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', - r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' - '.*?([\'"])', - r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] - -for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS.append(reg_ex) - common_cli_opts = [ cfg.BoolOpt('debug', @@ -138,6 +120,12 @@ help='Log output to standard error.') ] +DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', + 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN', + 'urllib3.connectionpool=WARN', 'websocket=WARN'] + log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' @@ -156,17 +144,7 @@ '%(instance)s', help='Prefix each line of exception output with this format.'), cfg.ListOpt('default_log_levels', - default=[ - 'amqp=WARN', - 'amqplib=WARN', - 'boto=WARN', - 'qpid=WARN', - 'sqlalchemy=WARN', - 'suds=INFO', - 'oslo.messaging=INFO', - 'iso8601=WARN', - 'requests.packages.urllib3.connectionpool=WARN' - ], + default=DEFAULT_LOG_LEVELS, help='List of logger=LEVEL pairs.'), cfg.BoolOpt('publish_errors', default=False, @@ -244,40 +222,6 @@ def _get_log_file_path(binary=None): return None -def mask_password(message, secret="***"): - """Replace password with 'secret' in message. - - :param message: The string which includes security information. - :param secret: value with which to replace passwords. - :returns: The unicode value of message with the password fields masked. - - For example: - - >>> mask_password("'adminPass' : 'aaaaa'") - "'adminPass' : '***'" - >>> mask_password("'admin_pass' : 'aaaaa'") - "'admin_pass' : '***'" - >>> mask_password('"password" : "aaaaa"') - '"password" : "***"' - >>> mask_password("'original_password' : 'aaaaa'") - "'original_password' : '***'" - >>> mask_password("u'original_password' : u'aaaaa'") - "u'original_password' : u'***'" - """ - message = six.text_type(message) - - # NOTE(ldbragst): Check to see if anything in message contains any key - # specified in _SANITIZE_KEYS, if not then just return the message since - # we don't have to mask any passwords. - if not any(key in message for key in _SANITIZE_KEYS): - return message - - secret = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS: - message = re.sub(pattern, secret, message) - return message - - class BaseLoggerAdapter(logging.LoggerAdapter): def audit(self, msg, *args, **kwargs): @@ -295,6 +239,11 @@ def __init__(self, name='unknown', version='unknown'): def logger(self): if not self._logger: self._logger = getLogger(self.name, self.version) + if six.PY3: + # In Python 3, the code fails because the 'manager' attribute + # cannot be found when using a LoggerAdapter as the + # underlying logger. Work around this issue. + self._logger.manager = self._logger.logger.manager return self._logger @@ -448,7 +397,7 @@ def _load_log_config(log_config_append): try: logging.config.fileConfig(log_config_append, disable_existing_loggers=False) - except moves.configparser.Error as exc: + except (moves.configparser.Error, KeyError) as exc: raise LogConfigError(log_config_append, six.text_type(exc)) @@ -461,9 +410,20 @@ def setup(product_name, version='unknown'): sys.excepthook = _create_logging_excepthook(product_name) -def set_defaults(logging_context_format_string): - cfg.set_defaults( - log_opts, logging_context_format_string=logging_context_format_string) +def set_defaults(logging_context_format_string=None, + default_log_levels=None): + # Just in case the caller is not setting the + # default_log_level. This is insurance because + # we introduced the default_log_level parameter + # later in a backwards in-compatible change + if default_log_levels is not None: + cfg.set_defaults( + log_opts, + default_log_levels=default_log_levels) + if logging_context_format_string is not None: + cfg.set_defaults( + log_opts, + logging_context_format_string=logging_context_format_string) def _find_facility_from_conf(): diff --git a/nova/openstack/common/memorycache.py b/nova/openstack/common/memorycache.py index 313a8c14fb..5e16363eaa 100644 --- a/nova/openstack/common/memorycache.py +++ b/nova/openstack/common/memorycache.py @@ -22,7 +22,6 @@ memcache_opts = [ cfg.ListOpt('memcached_servers', - default=None, help='Memcached servers or None for in process cache.'), ] @@ -36,11 +35,8 @@ def get_client(memcached_servers=None): if not memcached_servers: memcached_servers = CONF.memcached_servers if memcached_servers: - try: - import memcache - client_cls = memcache.Client - except ImportError: - pass + import memcache + client_cls = memcache.Client return client_cls(memcached_servers, debug=0) diff --git a/nova/openstack/common/sslutils.py b/nova/openstack/common/sslutils.py index a18e7fd051..00e6173d07 100644 --- a/nova/openstack/common/sslutils.py +++ b/nova/openstack/common/sslutils.py @@ -22,15 +22,12 @@ ssl_opts = [ cfg.StrOpt('ca_file', - default=None, help="CA certificate file to use to verify " "connecting clients."), cfg.StrOpt('cert_file', - default=None, help="Certificate file to use when starting " "the server securely."), cfg.StrOpt('key_file', - default=None, help="Private key file to use when starting " "the server securely."), ] diff --git a/nova/openstack/common/strutils.py b/nova/openstack/common/strutils.py index 3d98260b1d..b75eb85354 100644 --- a/nova/openstack/common/strutils.py +++ b/nova/openstack/common/strutils.py @@ -50,6 +50,28 @@ SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") +# NOTE(flaper87): The following 3 globals are used by `mask_password` +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' + '.*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + def int_from_bool_as_string(subject): """Interpret a string as a boolean and return either 1 or 0. @@ -237,3 +259,37 @@ def to_slug(value, incoming=None, errors="strict"): "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value) + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message diff --git a/nova/openstack/common/systemd.py b/nova/openstack/common/systemd.py index 4fa0c62790..5628d54f00 100644 --- a/nova/openstack/common/systemd.py +++ b/nova/openstack/common/systemd.py @@ -50,14 +50,16 @@ def _sd_notify(unset_env, msg): def notify(): """Send notification to Systemd that service is ready. + For details see - http://www.freedesktop.org/software/systemd/man/sd_notify.html + http://www.freedesktop.org/software/systemd/man/sd_notify.html """ _sd_notify(False, 'READY=1') def notify_once(): """Send notification once to Systemd that service is ready. + Systemd sets NOTIFY_SOCKET environment variable with the name of the socket listening for notifications from services. This method removes the NOTIFY_SOCKET environment variable to ensure @@ -75,7 +77,7 @@ def onready(notify_socket, timeout): :type timeout: float :returns: 0 service ready 1 service not ready - 2 timeout occured + 2 timeout occurred """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) sock.settimeout(timeout) diff --git a/nova/openstack/common/versionutils.py b/nova/openstack/common/versionutils.py index 86e196140d..1facce7726 100644 --- a/nova/openstack/common/versionutils.py +++ b/nova/openstack/common/versionutils.py @@ -18,6 +18,7 @@ """ import functools + import pkg_resources from nova.openstack.common.gettextutils import _ @@ -52,18 +53,34 @@ class deprecated(object): >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) ... def c(): pass + 4. Specifying the deprecated functionality will not be removed: + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0) + ... def d(): pass + + 5. Specifying a replacement, deprecated functionality will not be removed: + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0) + ... def e(): pass + """ + # NOTE(morganfainberg): Bexar is used for unit test purposes, it is + # expected we maintain a gap between Bexar and Folsom in this list. + BEXAR = 'B' FOLSOM = 'F' GRIZZLY = 'G' HAVANA = 'H' ICEHOUSE = 'I' + JUNO = 'J' _RELEASES = { + # NOTE(morganfainberg): Bexar is used for unit test purposes, it is + # expected we maintain a gap between Bexar and Folsom in this list. + 'B': 'Bexar', 'F': 'Folsom', 'G': 'Grizzly', 'H': 'Havana', 'I': 'Icehouse', + 'J': 'Juno', } _deprecated_msg_with_alternative = _( @@ -74,6 +91,12 @@ class deprecated(object): '%(what)s is deprecated as of %(as_of)s and may be ' 'removed in %(remove_in)s. It will not be superseded.') + _deprecated_msg_with_alternative_no_removal = _( + '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.') + + _deprecated_msg_with_no_alternative_no_removal = _( + '%(what)s is deprecated as of %(as_of)s. It will not be superseded.') + def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): """Initialize decorator @@ -119,9 +142,19 @@ def _build_message(self): if self.in_favor_of: details['in_favor_of'] = self.in_favor_of - msg = self._deprecated_msg_with_alternative + if self.remove_in > 0: + msg = self._deprecated_msg_with_alternative + else: + # There are no plans to remove this function, but it is + # now deprecated. + msg = self._deprecated_msg_with_alternative_no_removal else: - msg = self._deprecated_msg_no_alternative + if self.remove_in > 0: + msg = self._deprecated_msg_no_alternative + else: + # There are no plans to remove this function, but it is + # now deprecated. + msg = self._deprecated_msg_with_no_alternative_no_removal return msg, details From 0ba6484d4c36b8f0c9cfb2a0bc20644bedda44ea Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 2 Jul 2014 14:47:35 +0100 Subject: [PATCH 070/486] libvirt: remove unused list_instance_ids method All code has been converted to use list_instance_domains so the list_instance_ids method can finally be removed. Blueprint: libvirt-domain-listing-speedup Change-Id: I9ce33cda5608796e0eec4ec0804ee179a4704d9d --- nova/virt/libvirt/driver.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 914ea95f7b..a3abb58859 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -854,12 +854,6 @@ def _list_instance_domains(self, only_running=True, only_guests=True): return doms - # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed. - def _list_instance_ids(self): - if self._conn.numOfDomains() == 0: - return [] - return self._conn.listDomainsID() - def list_instances(self): names = [] for dom in self._list_instance_domains(only_running=False): From d455a4d62e269a642f7c1fd4365096d171811d38 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Tue, 24 Jun 2014 17:29:22 +0100 Subject: [PATCH 071/486] libvirt: remove flawed get_num_instances method impl The get_num_instances method impl simply asks libvirt how many running domains there are. This is flawed for a number of reasons. - It doesn't filter out 'Domain-0' on Xen - It doesn't count inactive instances As a result the value is returns is inconsistent with the number of instances shown by 'list_instances'. This will cause the _sync_power_states method in the compute manager to constantly try to re-sync the list of instances. Remove the flawed impl and just allow the base class default impl to be used. This just does a len() call over the return of 'list_instances'. Previously this was quite a heavy API call, but with new enough libvirt it can be completed in a single API call which should be fine. Blueprint: libvirt-domain-listing-speedup Change-Id: I96242717afa5c928f1cfb5f307b71739f2147e69 --- nova/virt/libvirt/driver.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index a3abb58859..f9f3796583 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -770,10 +770,6 @@ def _connect(uri, read_only): payload) raise exception.HypervisorUnavailable(host=CONF.host) - def get_num_instances(self): - """Efficient override of base get_num_instances method.""" - return self._conn.numOfDomains() - def instance_exists(self, instance): """Efficient override of base instance_exists method.""" try: From 70e0d3f13bd2ffaa19ddb6a50074723a7dbb0226 Mon Sep 17 00:00:00 2001 From: Christopher Lefelhocz Date: Thu, 17 Jul 2014 14:17:47 -0500 Subject: [PATCH 072/486] Fix Cells ImagePropertiesFilter can raise exceptions In a misconfigured scenario the ImagePropertiesFilter can raise an exception which causes the image to be unbuildable in the region. This happens when the capabilities is set for prominent_hypervisor_version to an empty list. Change-Id: I17bd9d87fa69ad34b19591018543a34dfd050978 Closes-Bug: 1343462 --- nova/cells/filters/image_properties.py | 4 ++-- nova/tests/cells/test_cells_filters.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/cells/filters/image_properties.py b/nova/cells/filters/image_properties.py index 1871036ab3..66be72414e 100644 --- a/nova/cells/filters/image_properties.py +++ b/nova/cells/filters/image_properties.py @@ -58,8 +58,8 @@ def filter_all(self, cells, filter_properties): l = list(version) version = str(l[0]) - if version is None or self._matches_version(version, - hypervisor_version_requires): + if not version or self._matches_version(version, + hypervisor_version_requires): filtered_cells.append(cell) return filtered_cells diff --git a/nova/tests/cells/test_cells_filters.py b/nova/tests/cells/test_cells_filters.py index c4f0240611..851a871287 100644 --- a/nova/tests/cells/test_cells_filters.py +++ b/nova/tests/cells/test_cells_filters.py @@ -82,6 +82,7 @@ def test_missing_hypervisor_version_requires(self): def test_missing_hypervisor_version_in_cells(self): image = {'properties': {'hypervisor_version_requires': '>6.2.1'}} self.filter_props['request_spec'] = {'image': image} + self.cell1.capabilities = {"prominent_hypervisor_version": set([])} self.assertEqual(self.cells, self._filter_cells(self.cells, self.filter_props)) From 4f8ccd7b95c27180a1cfe689e3c6f46bde5f803b Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 30 Jun 2014 16:29:32 -0700 Subject: [PATCH 073/486] Do not process events for instances without host In some cases Neutron might send events such as 'VIF unplugged' for instances which are either being deleted or shelved. When that happens there will be a failure in dispatching the event to the appropriate compute node - as there is no host for the instance. As multiple neutron events can be stashed in a single call it is important to avoid that this kind of errors will prevent processing of other events in the same call. This patch does not process events for instances without a host, marking them as failed. When the above condition occurs, the create event request will return a 207 response code. For specific events, a 422 unprocessable entity code will be set. This patch also preserve the characteristic that events are returned in the response in the same order they were found in the request. Change-Id: I18062b81e50c722ec96b4296ac39384493683ae3 Closes-Bug: #1333654 --- .../compute/contrib/server_external_events.py | 42 ++++++++++++------ nova/compute/api.py | 3 ++ .../contrib/test_server_external_events.py | 43 +++++++++++++------ 3 files changed, 63 insertions(+), 25 deletions(-) diff --git a/nova/api/openstack/compute/contrib/server_external_events.py b/nova/api/openstack/compute/contrib/server_external_events.py index 97dd23b532..8716ae08df 100644 --- a/nova/api/openstack/compute/contrib/server_external_events.py +++ b/nova/api/openstack/compute/contrib/server_external_events.py @@ -70,7 +70,7 @@ def create(self, req, body): context = req.environ['nova.context'] authorize(context, action='create') - events = [] + response_events = [] accepted = [] instances = {} result = 200 @@ -101,8 +101,8 @@ def create(self, req, body): raise webob.exc.HTTPBadRequest( _('Invalid event status `%s\'') % event.status) - events.append(_event) - if event.instance_uuid not in instances: + instance = instances.get(event.instance_uuid) + if not instance: try: instance = objects.Instance.get_by_uuid( context, event.instance_uuid) @@ -115,24 +115,40 @@ def create(self, req, body): _event['code'] = 404 result = 207 - if event.instance_uuid in instances: - accepted.append(event) - _event['code'] = 200 - LOG.audit(_('Create event %(name)s:%(tag)s for instance ' - '%(instance_uuid)s'), - dict(event.iteritems())) + # NOTE: before accepting the event, make sure the instance + # for which the event is sent is assigned to a host; otherwise + # it will not be possible to dispatch the event + if instance: + if instance.host: + accepted.append(event) + LOG.audit(_('Creating event %(name)s:%(tag)s for instance ' + '%(instance_uuid)s'), + dict(event.iteritems())) + # NOTE: as the event is processed asynchronously verify + # whether 202 is a more suitable response code than 200 + _event['status'] = 'completed' + _event['code'] = 200 + else: + LOG.debug("Unable to find a host for instance " + "%(instance)s. Dropping event %(event)s", + {'instance': event.instance_uuid, + 'event': event.name}) + _event['status'] = 'failed' + _event['code'] = 422 + result = 207 + + response_events.append(_event) if accepted: - self.compute_api.external_instance_event(context, - instances.values(), - accepted) + self.compute_api.external_instance_event( + context, instances.values(), accepted) else: msg = _('No instances found for any event') raise webob.exc.HTTPNotFound(explanation=msg) # FIXME(cyeoh): This needs some infrastructure support so that # we have a general way to do this - robj = wsgi.ResponseObject({'events': events}) + robj = wsgi.ResponseObject({'events': response_events}) robj._code = result return robj diff --git a/nova/compute/api.py b/nova/compute/api.py index 612e1c1da2..a44fd49ab3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -3125,6 +3125,9 @@ def external_instance_event(self, context, instances, events): events_by_host[host] = events_on_host for host in instances_by_host: + # TODO(salv-orlando): Handle exceptions raised by the rpc api layer + # in order to ensure that a failure in processing events on a host + # will not prevent processing events on other hosts self.compute_rpcapi.external_instance_event( context, instances_by_host[host], events_by_host[host]) diff --git a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py index 4091116a47..160f6f59a3 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py @@ -30,9 +30,11 @@ uuid='00000000-0000-0000-0000-000000000002', host='host1'), '00000000-0000-0000-0000-000000000003': objects.Instance( uuid='00000000-0000-0000-0000-000000000003', host='host2'), + '00000000-0000-0000-0000-000000000004': objects.Instance( + uuid='00000000-0000-0000-0000-000000000004', host=None), } fake_instance_uuids = sorted(fake_instances.keys()) -MISSING_UUID = '00000000-0000-0000-0000-000000000004' +MISSING_UUID = '00000000-0000-0000-0000-000000000005' @classmethod @@ -49,16 +51,20 @@ def setUp(self): super(ServerExternalEventsTest, self).setUp() self.api = server_external_events.ServerExternalEventsController() self.context = context.get_admin_context() - self.default_body = { - 'events': [ - {'name': 'network-vif-plugged', - 'tag': 'foo', - 'status': 'completed', - 'server_uuid': fake_instance_uuids[0]}, - {'name': 'network-changed', - 'server_uuid': fake_instance_uuids[1]}, - ] - } + self.event_1 = {'name': 'network-vif-plugged', + 'tag': 'foo', + 'server_uuid': fake_instance_uuids[0]} + self.event_2 = {'name': 'network-changed', + 'server_uuid': fake_instance_uuids[1]} + self.default_body = {'events': [self.event_1, self.event_2]} + self.resp_event_1 = dict(self.event_1) + self.resp_event_1['code'] = 200 + self.resp_event_1['status'] = 'completed' + self.resp_event_2 = dict(self.event_2) + self.resp_event_2['code'] = 200 + self.resp_event_2['status'] = 'completed' + self.default_resp_body = {'events': [self.resp_event_1, + self.resp_event_2]} def _create_req(self, body): req = webob.Request.blank('/v2/fake/os-server-external-events') @@ -91,7 +97,7 @@ def test_create(self): fake_instance_uuids[:2], ['network-vif-plugged', 'network-changed']) - self.assertEqual(self.default_body, result) + self.assertEqual(self.default_resp_body, result) self.assertEqual(200, code) def test_create_one_bad_instance(self): @@ -105,6 +111,19 @@ def test_create_one_bad_instance(self): self.assertEqual(404, result['events'][1]['code']) self.assertEqual(207, code) + def test_create_event_instance_has_no_host(self): + body = self.default_body + body['events'][0]['server_uuid'] = fake_instance_uuids[-1] + req = self._create_req(body) + result, code = self._assert_call(req, body, + [fake_instance_uuids[1], + fake_instance_uuids[-1]], + ['network-changed']) + self.assertEqual(422, result['events'][0]['code']) + self.assertEqual('failed', result['events'][0]['status']) + self.assertEqual(200, result['events'][1]['code']) + self.assertEqual(207, code) + def test_create_no_good_instances(self): body = self.default_body body['events'][0]['server_uuid'] = MISSING_UUID From 9e8657839844ecef3348dad51aca6411f05da99d Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Wed, 4 Jun 2014 17:54:44 +0930 Subject: [PATCH 074/486] Correctly reject request to add lists of hosts to an aggregate Attempting to add or remove a list of hosts to an aggregate results in an internal server error. This patch fixes the input validation so such a request is correctly rejected and a 400 error returned. Change-Id: I760db36d685cff20b01a72160b8b3fbb8a70b412 Closes-Bug: #1321653 --- .../api/openstack/compute/contrib/aggregates.py | 17 +++++++++++++---- .../compute/contrib/test_aggregates.py | 13 +++++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py index c5435c5f21..465dfa532d 100644 --- a/nova/api/openstack/compute/contrib/aggregates.py +++ b/nova/api/openstack/compute/contrib/aggregates.py @@ -37,10 +37,19 @@ def _get_context(req): def get_host_from_body(fn): """Makes sure that the host exists.""" def wrapped(self, req, id, body, *args, **kwargs): - if len(body) == 1 and "host" in body: - host = body['host'] - else: - raise exc.HTTPBadRequest() + if len(body) != 1: + msg = _('Only host parameter can be specified') + raise exc.HTTPBadRequest(explanation=msg) + elif 'host' not in body: + msg = _('Host parameter must be specified') + raise exc.HTTPBadRequest(explanation=msg) + try: + utils.check_string_length(body['host'], 'host', 1, 255) + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + + host = body['host'] + return fn(self, req, id, host, *args, **kwargs) return wrapped diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py index 5e8d19e85b..9b84b9dc8b 100644 --- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py +++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py @@ -396,6 +396,14 @@ def test_add_host_with_missing_host(self): self.assertRaises(exc.HTTPBadRequest, self.controller.action, self.req, "1", body={"add_host": {"asdf": "asdf"}}) + def test_add_host_with_invalid_format_host(self): + self.assertRaises(exc.HTTPBadRequest, self.controller.action, + self.req, "1", body={"add_host": {"host": "a" * 300}}) + + def test_add_host_with_multiple_hosts(self): + self.assertRaises(exc.HTTPBadRequest, self.controller.action, + self.req, "1", body={"add_host": {"host": ["host1", "host2"]}}) + def test_add_host_raises_key_error(self): def stub_add_host_to_aggregate(context, aggregate, host): raise KeyError @@ -466,6 +474,11 @@ def test_remove_host_with_missing_host(self): self.assertRaises(exc.HTTPBadRequest, self.controller.action, self.req, "1", body={"asdf": "asdf"}) + def test_remove_host_with_multiple_hosts(self): + self.assertRaises(exc.HTTPBadRequest, self.controller.action, + self.req, "1", body={"remove_host": {"host": + ["host1", "host2"]}}) + def test_remove_host_with_extra_param(self): self.assertRaises(exc.HTTPBadRequest, self.controller.action, self.req, "1", body={"remove_host": {"asdf": "asdf", From 83b37aeb5da8af6e305098a7e698d058f431f332 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Fri, 18 Jul 2014 11:53:23 +0800 Subject: [PATCH 075/486] Add decorator expected_errors to V3 servers core All v3 api should use expected_errors to prevent unexpected exception raised. This patch add decorator expected_errors for v3 servers core. Change-Id: Ia1fa09d6cf68a65c85ac5fd2aff9b9202d76e7ca Closes-Bug: #1343778 --- nova/api/openstack/compute/plugins/v3/servers.py | 12 ++++++++++++ .../api/openstack/compute/plugins/v3/test_servers.py | 3 ++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 60ab3b8ad8..b8365158d8 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -166,6 +166,7 @@ def check_load_extension(ext): if not list(self.update_extension_manager): LOG.debug("Did not find any server update extensions") + @extensions.expected_errors((400, 403)) def index(self, req): """Returns a list of server names and ids for a given user.""" try: @@ -174,6 +175,7 @@ def index(self, req): raise exc.HTTPBadRequest(explanation=err.format_message()) return servers + @extensions.expected_errors((400, 403)) def detail(self, req): """Returns a list of server details for a given user.""" try: @@ -396,6 +398,7 @@ def _decode_base64(self, data): except TypeError: return None + @extensions.expected_errors(404) def show(self, req, id): """Returns server details by server id.""" context = req.environ['nova.context'] @@ -405,6 +408,7 @@ def show(self, req, id): req.cache_db_instance(instance) return self._view_builder.show(req, instance) + @extensions.expected_errors((400, 409, 413)) @wsgi.response(202) def create(self, req, body): """Creates a new server for a given user.""" @@ -581,6 +585,7 @@ def _delete(self, context, req, instance_uuid): else: self.compute_api.delete(context, instance) + @extensions.expected_errors((400, 404)) def update(self, req, id, body): """Update server then pass on to version-specific controller.""" if not self.is_valid_body(body, 'server'): @@ -617,6 +622,7 @@ def update(self, req, id, body): msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) + @extensions.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('confirm_resize') def _action_confirm_resize(self, req, id, body): @@ -633,6 +639,7 @@ def _action_confirm_resize(self, req, id, body): common.raise_http_conflict_for_instance_invalid_state(state_error, 'confirm_resize') + @extensions.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('revert_resize') def _action_revert_resize(self, req, id, body): @@ -653,6 +660,7 @@ def _action_revert_resize(self, req, id, body): 'revert_resize') return webob.Response(status_int=202) + @extensions.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('reboot') def _action_reboot(self, req, id, body): @@ -720,6 +728,7 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): return webob.Response(status_int=202) + @extensions.expected_errors((404, 409)) @wsgi.response(204) def delete(self, req, id): """Destroys a server.""" @@ -771,6 +780,7 @@ def _flavor_id_from_req_data(self, data): return common.get_id_from_href(flavor_ref) + @extensions.expected_errors((400, 401, 404, 409, 413)) @wsgi.response(202) @wsgi.action('resize') def _action_resize(self, req, id, body): @@ -789,6 +799,7 @@ def _action_resize(self, req, id, body): return self._resize(req, id, flavor_ref, **resize_kwargs) + @extensions.expected_errors((400, 404, 409, 413)) @wsgi.response(202) @wsgi.action('rebuild') def _action_rebuild(self, req, id, body): @@ -871,6 +882,7 @@ def _action_rebuild(self, req, id, body): robj = wsgi.ResponseObject(view) return self._add_location(robj) + @extensions.expected_errors((400, 404, 409, 413)) @wsgi.response(202) @wsgi.action('create_image') @common.check_snapshots_enabled diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 8f6a6e5889..41fbf746c7 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -2243,7 +2243,8 @@ def fake_keypair_server_create(self, server_dict, req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" - self.assertRaises(KeyError, self.controller.create, req, body=body) + self.assertRaises(webob.exc.HTTPInternalServerError, + self.controller.create, req, body=body) def test_create_instance_pass_disabled(self): self.flags(enable_instance_password=False) From ac865a9bb5fe71bb09fd223931370613669563ba Mon Sep 17 00:00:00 2001 From: jichenjc Date: Fri, 18 Jul 2014 15:37:46 +0800 Subject: [PATCH 076/486] Add instance to debug log at compute api Add instance info to 2 debug logs at compute api layer. Change-Id: Icbe8e45ace74b8bf0282cf6d6dd1ead792532961 --- nova/compute/api.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 612e1c1da2..297e178950 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1619,7 +1619,8 @@ def _create_reservations(self, context, instance, original_task_state, vram_mb = int(old_inst_type.get('extra_specs', {}).get(VIDEO_RAM, 0)) instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb) - LOG.debug("going to delete a resizing instance") + LOG.debug("going to delete a resizing instance", + instance=instance) reservations = QUOTAS.reserve(context, project_id=project_id, @@ -3053,7 +3054,7 @@ def evacuate(self, context, instance, host, on_shared_storage, Checking vm compute host state, if the host not in expected_state, raising an exception. """ - LOG.debug('vm evacuation scheduled') + LOG.debug('vm evacuation scheduled', instance=instance) inst_host = instance.host service = objects.Service.get_by_compute_host(context, inst_host) if self.servicegroup_api.service_is_up(service): From 1bde3491072bd6b8d6768fa9b58c5959467d0d5b Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Fri, 18 Jul 2014 12:02:12 +0800 Subject: [PATCH 077/486] Add decorator expected_errors for v3 attach_interfaces All v3 api should use expected_errors to prevent unexpected exception raised. This patch add decorator expected_errors for v3 attach_interfaces. This patch also remove update action. Let the wsgi return HTTPNotFound for it. Change-Id: Icefe9f9adfa7ad01bc3ab11dcb7c132702390d04 --- .../openstack/compute/plugins/v3/attach_interfaces.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py index 477f432be9..4ccb3a5411 100644 --- a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py +++ b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py @@ -54,11 +54,13 @@ def __init__(self): self.network_api = network.API() super(InterfaceAttachmentController, self).__init__() + @extensions.expected_errors((404, 501)) def index(self, req, server_id): """Returns the list of interface attachments for a given instance.""" return self._items(req, server_id, entity_maker=_translate_interface_attachment_view) + @extensions.expected_errors(404) def show(self, req, server_id, id): """Return data about the given interface attachment.""" context = req.environ['nova.context'] @@ -78,6 +80,7 @@ def show(self, req, server_id, id): return {'interface_attachment': _translate_interface_attachment_view( port_info['port'])} + @extensions.expected_errors((400, 404, 409, 500, 501)) @validation.schema(attach_interfaces.create) def create(self, req, server_id, body): """Attach an interface to an instance.""" @@ -126,11 +129,7 @@ def create(self, req, server_id, body): return self.show(req, server_id, vif['id']) - def update(self, req, server_id, id, body): - """Update a interface attachment. We don't currently support this.""" - msg = _("Attachments update is not supported") - raise exc.HTTPNotImplemented(explanation=msg) - + @extensions.expected_errors((404, 409, 501)) def delete(self, req, server_id, id): """Detach an interface from an instance.""" context = req.environ['nova.context'] From 0550ae806ac3f4f184a0cc10680e0da80940fb01 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 16 Jul 2014 10:05:52 -0400 Subject: [PATCH 078/486] update ignore list for pep8 Remove H803 as Nova does not run into that issue. Sort the rest of the items (E129 was out of place). Change-Id: I6c2537dd27c947e36ebf37eb3b5c8a1ab8b026a1 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index daeebd56d2..3999e6e041 100644 --- a/tox.ini +++ b/tox.ini @@ -61,7 +61,7 @@ sitepackages = False # Stricter in hacking 0.9: F402 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E121,E122,E123,E124,E125,E129,E126,E127,E128,E131,E251,E265,E711,E712,F402,H405,H803,H904 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,E711,E712,F402,H405,H904 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools [hacking] From d902596f846798d9310a9ab66f6e4b28bcbe1bfa Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 18 Jul 2014 14:18:23 +0200 Subject: [PATCH 079/486] Minor tweaks to hypervisor_version to int Follow up to I28ce23509e3c9feae183a49a8fc5bf3c7c601295. That path had a few minor issues that weren't worth holding up the patch for. This patch fixes those issues. Change-Id: I878cb028e1dc48ad5182820d580fab609ba62db4 --- nova/tests/integrated/v3/test_migrate_server.py | 4 +++- nova/tests/virt/test_virt_drivers.py | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/integrated/v3/test_migrate_server.py b/nova/tests/integrated/v3/test_migrate_server.py index 456ee4f563..a0ed4d25f4 100644 --- a/nova/tests/integrated/v3/test_migrate_server.py +++ b/nova/tests/integrated/v3/test_migrate_server.py @@ -16,6 +16,7 @@ from nova.conductor import manager as conductor_manager from nova import db from nova.tests.integrated.v3 import test_servers +from nova import utils class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase): @@ -55,7 +56,8 @@ def fake_get_compute(context, host): report_count=1, updated_at='foo', hypervisor_type='bar', - hypervisor_version='1000', + hypervisor_version=utils.convert_version_to_int( + '1.0'), disabled=False) return {'compute_node': [service]} self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute) diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index de34b3d085..de8cdd87f0 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -597,7 +597,6 @@ def _check_available_resource_fields(self, host_status): def test_get_host_stats(self): host_status = self.connection.get_host_stats() self._check_available_resource_fields(host_status) - self.assertIsInstance(host_status['hypervisor_version'], int) @catch_notimplementederror def test_get_available_resource(self): From 46d8dcfa5807ef84d6b96b056957a1b9aa0daff7 Mon Sep 17 00:00:00 2001 From: Thang Pham Date: Sun, 23 Mar 2014 21:33:43 -0400 Subject: [PATCH 080/486] Implement methods to modify volume metadata. There are four methods in nova/volume/cinder.py which are NotImplemented. They are as follows: 1. get_volume_metadata 2. delete_volume_metadata 3. update_volume_metadata 4. get_volume_metadata_value These methods are required in cases where nova needs to modify a cinder volume's metadata, e.g. attach and detach time. It also completes the cinder.API class. Change-Id: I360076ae28db43e661466f556425947813c9040e Closes-Bug: #1296164 --- nova/tests/volume/test_cinder.py | 104 +++++++++++++++++++++++++++---- nova/volume/cinder.py | 20 ++++-- 2 files changed, 106 insertions(+), 18 deletions(-) diff --git a/nova/tests/volume/test_cinder.py b/nova/tests/volume/test_cinder.py index affa85a7c9..a9dd8d65c5 100644 --- a/nova/tests/volume/test_cinder.py +++ b/nova/tests/volume/test_cinder.py @@ -41,6 +41,22 @@ def __init__(self): self.volume_snapshots = self.volumes +class FakeVolume(object): + def __init__(self, dict=dict()): + self.id = dict.get('id') or '1234' + self.status = dict.get('status') or 'available' + self.size = dict.get('size') or 1 + self.availability_zone = dict.get('availability_zone') or 'cinder' + self.created_at = dict.get('created_at') + self.attach_time = dict.get('attach_time') + self.mountpoint = dict.get('mountpoint') + self.display_name = dict.get('display_name') or 'volume-' + self.id + self.display_description = dict.get('display_description') or 'fake' + self.volume_type_id = dict.get('volume_type_id') + self.snapshot_id = dict.get('snapshot_id') + self.metadata = dict.get('volume_metadata') or {} + + class CinderApiTestCase(test.NoDBTestCase): def setUp(self): super(CinderApiTestCase, self).setUp() @@ -327,21 +343,85 @@ def test_delete_snapshot(self): self.api.delete_snapshot(self.ctx, 'id1') - def test_get_volume_metadata(self): - self.assertRaises(NotImplementedError, - self.api.get_volume_metadata, self.ctx, '') + @mock.patch('nova.volume.cinder.cinderclient') + def test_get_volume_metadata(self, mock_cinderclient): + volume_id = 'id1' + metadata = {'key1': 'value1', 'key2': 'value2'} + volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata}) - def test_get_volume_metadata_value(self): - self.assertRaises(NotImplementedError, - self.api.get_volume_metadata_value, '', '') + mock_volumes = mock.MagicMock() + mock_volumes.get.return_value = volume + mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) - def test_delete_volume_metadata(self): - self.assertRaises(NotImplementedError, - self.api.delete_volume_metadata, self.ctx, '', '') + results = self.api.get_volume_metadata(self.ctx, volume_id) - def test_update_volume_metadata(self): - self.assertRaises(NotImplementedError, - self.api.update_volume_metadata, self.ctx, '', '') + mock_cinderclient.assert_called_once_with(self.ctx) + mock_volumes.get.assert_called_once_with(volume_id) + self.assertEqual(results, metadata) + + @mock.patch('nova.volume.cinder.cinderclient') + def test_get_volume_metadata_value(self, mock_cinderclient): + volume_id = 'id1' + metadata = {'key1': 'value1'} + volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata}) + + mock_volumes = mock.MagicMock() + mock_volumes.get.return_value = volume + mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) + + results = self.api.get_volume_metadata_value(self.ctx, volume_id, + 'key1') + mock_cinderclient.assert_called_once_with(self.ctx) + mock_volumes.get.assert_called_once_with(volume_id) + self.assertEqual(results, 'value1') + + @mock.patch('nova.volume.cinder.cinderclient') + def test_delete_volume_metadata(self, mock_cinderclient): + volume_id = 'id1' + keys = ['key1', 'key2', 'key3'] + + mock_volumes = mock.MagicMock() + mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) + + self.api.delete_volume_metadata(self.ctx, volume_id, keys) + + mock_cinderclient.assert_called_once_with(self.ctx) + mock_volumes.delete_metadata.assert_called_once_with(volume_id, keys) + + @mock.patch('nova.volume.cinder.cinderclient') + def test_update_volume_metadata(self, mock_cinderclient): + volume_id = 'id1' + metadata = {'key1': 'value1'} + + mock_volumes = mock.MagicMock() + mock_volumes.set_metadata.return_value = metadata + mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) + + updated_meta = self.api.update_volume_metadata(self.ctx, volume_id, + metadata) + + mock_cinderclient.assert_called_once_with(self.ctx) + self.assertFalse(mock_volumes.update_all_metadata.called) + mock_volumes.set_metadata.assert_called_once_with(volume_id, metadata) + self.assertEqual(metadata, updated_meta) + + @mock.patch('nova.volume.cinder.cinderclient') + def test_update_volume_metadata_delete(self, mock_cinderclient): + volume_id = 'id1' + metadata = {'key1': 'value1', 'key2': 'value2'} + + mock_volumes = mock.MagicMock() + mock_volumes.update_all_metadata.return_value = metadata + mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) + + updated_meta = self.api.update_volume_metadata(self.ctx, volume_id, + metadata, delete=True) + + mock_cinderclient.assert_called_once_with(self.ctx) + mock_volumes.update_all_metadata.assert_called_once_with(volume_id, + metadata) + self.assertFalse(mock_volumes.set_metadata.called) + self.assertEqual(metadata, updated_meta) def test_update_snapshot_status(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 2ba00aa811..60223e92b3 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -377,20 +377,28 @@ def get_volume_encryption_metadata(self, context, volume_id): @translate_volume_exception def get_volume_metadata(self, context, volume_id): - raise NotImplementedError() + vol = cinderclient(context).volumes.get(volume_id) + return vol.metadata @translate_volume_exception - def delete_volume_metadata(self, context, volume_id, key): - raise NotImplementedError() + def delete_volume_metadata(self, context, volume_id, keys): + cinderclient(context).volumes.delete_metadata(volume_id, keys) @translate_volume_exception def update_volume_metadata(self, context, volume_id, metadata, delete=False): - raise NotImplementedError() + if delete: + # Completely replace volume metadata with one given + return cinderclient(context).volumes.update_all_metadata( + volume_id, metadata) + else: + return cinderclient(context).volumes.set_metadata( + volume_id, metadata) @translate_volume_exception - def get_volume_metadata_value(self, volume_id, key): - raise NotImplementedError() + def get_volume_metadata_value(self, context, volume_id, key): + vol = cinderclient(context).volumes.get(volume_id) + return vol.metadata.get(key) @translate_snapshot_exception def update_snapshot_status(self, context, snapshot_id, status): From 280fb980290017091f2596ecc892202d492f0f67 Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Mon, 7 Jul 2014 12:03:30 -0700 Subject: [PATCH 081/486] Cleanup: remove unused argument The vm_util.get_values_from_object_properties(...) method in the VMware driver takes an unused param 'properties'. This patch cleans up the method signature. Change-Id: Iba738c2b49f829c1c0e17973b5265e88741fade7 --- nova/tests/virt/vmwareapi/test_vm_util.py | 6 +----- nova/virt/vmwareapi/vm_util.py | 5 ++--- nova/virt/vmwareapi/vmops.py | 10 ++++------ 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py index 0e7f30f1e2..fad620ca22 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util.py +++ b/nova/tests/virt/vmwareapi/test_vm_util.py @@ -723,12 +723,8 @@ def _create_fake_vm_objects(self): def test_get_values(self): objects = self._create_fake_vm_objects() - lst_properties = ['runtime.powerState', - 'summary.guest.toolsStatus', - 'summary.guest.toolsRunningStatus'] query = vm_util.get_values_from_object_properties( - fake.FakeObjectRetrievalSession(objects), - objects, lst_properties) + fake.FakeObjectRetrievalSession(objects), objects) self.assertEqual('poweredOn', query['runtime.powerState']) self.assertEqual('guestToolsRunning', query['summary.guest.toolsRunningStatus']) diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index f6e3e7adf2..0bc70d50d2 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1380,11 +1380,10 @@ def power_on_instance(session, instance, vm_ref=None): LOG.debug("VM already powered on", instance=instance) -def get_values_from_object_properties(session, props, properties): +def get_values_from_object_properties(session, props): """Get the specific values from a object list. - The object values will be returned as a dictionary. The keys for the - dictionary will be the 'properties'. + The object values will be returned as a dictionary. """ dictionary = {} while props: diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 9d7093c7a4..352112d03d 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -842,8 +842,7 @@ def reboot(self, instance, network_info): props = self._session._call_method(vim_util, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) - query = vm_util.get_values_from_object_properties(self._session, props, - lst_properties) + query = vm_util.get_values_from_object_properties(self._session, props) pwr_state = query['runtime.powerState'] tools_status = query['summary.guest.toolsStatus'] tools_running_status = query['summary.guest.toolsRunningStatus'] @@ -887,7 +886,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) query = vm_util.get_values_from_object_properties( - self._session, props, lst_properties) + self._session, props) pwr_state = query['runtime.powerState'] vm_config_pathname = query['config.files.vmPathName'] vm_ds_path = None @@ -1267,7 +1266,7 @@ def get_info(self, instance): "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) query = vm_util.get_values_from_object_properties( - self._session, vm_props, lst_properties) + self._session, vm_props) max_mem = int(query['summary.config.memorySizeMB']) * 1024 return {'state': VMWARE_POWER_STATES[query['runtime.powerState']], 'max_mem': max_mem, @@ -1285,8 +1284,7 @@ def get_diagnostics(self, instance): "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) query = vm_util.get_values_from_object_properties(self._session, - vm_props, - lst_properties) + vm_props) data = {} # All of values received are objects. Convert them to dictionaries for value in query.values(): From 826aed0ec7e59d52d515a86569b6e23b08c6c072 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 11 Jul 2014 21:03:15 -0400 Subject: [PATCH 082/486] Use oslo.i18n oslo.i18n provides the i18n functions that were provided by oslo-incubator's gettextutils module. Some tests that were using internal details of the library were removed. Change-Id: I44cfd5552e0dd86af21073419d31622f5fdb28e0 --- doc/source/devref/il8n.rst | 2 +- nova/api/auth.py | 2 +- nova/api/ec2/__init__.py | 4 +- nova/api/ec2/cloud.py | 2 +- nova/api/ec2/ec2utils.py | 2 +- nova/api/metadata/handler.py | 2 +- nova/api/metadata/password.py | 2 +- nova/api/metadata/vendordata_json.py | 2 +- nova/api/openstack/__init__.py | 12 ++-- nova/api/openstack/common.py | 2 +- .../compute/contrib/admin_actions.py | 2 +- .../openstack/compute/contrib/aggregates.py | 2 +- .../contrib/assisted_volume_snapshots.py | 2 +- .../compute/contrib/attach_interfaces.py | 2 +- .../compute/contrib/baremetal_nodes.py | 2 +- nova/api/openstack/compute/contrib/cells.py | 2 +- .../openstack/compute/contrib/certificates.py | 2 +- .../openstack/compute/contrib/cloudpipe.py | 2 +- .../compute/contrib/cloudpipe_update.py | 2 +- .../compute/contrib/console_auth_tokens.py | 2 +- .../compute/contrib/console_output.py | 2 +- .../api/openstack/compute/contrib/consoles.py | 2 +- .../openstack/compute/contrib/disk_config.py | 2 +- .../api/openstack/compute/contrib/evacuate.py | 2 +- .../openstack/compute/contrib/fixed_ips.py | 2 +- .../compute/contrib/flavor_access.py | 2 +- .../compute/contrib/flavorextraspecs.py | 2 +- .../openstack/compute/contrib/flavormanage.py | 2 +- .../compute/contrib/floating_ip_dns.py | 2 +- .../openstack/compute/contrib/floating_ips.py | 2 +- .../compute/contrib/floating_ips_bulk.py | 2 +- nova/api/openstack/compute/contrib/fping.py | 2 +- nova/api/openstack/compute/contrib/hosts.py | 2 +- .../openstack/compute/contrib/hypervisors.py | 2 +- .../contrib/instance_usage_audit_log.py | 2 +- .../api/openstack/compute/contrib/keypairs.py | 2 +- .../api/openstack/compute/contrib/multinic.py | 2 +- .../compute/contrib/networks_associate.py | 2 +- .../openstack/compute/contrib/os_networks.py | 2 +- .../compute/contrib/os_tenant_networks.py | 2 +- .../compute/contrib/quota_classes.py | 2 +- nova/api/openstack/compute/contrib/quotas.py | 2 +- nova/api/openstack/compute/contrib/rescue.py | 2 +- .../compute/contrib/scheduler_hints.py | 2 +- .../contrib/security_group_default_rules.py | 2 +- .../compute/contrib/security_groups.py | 2 +- .../compute/contrib/server_external_events.py | 2 +- .../compute/contrib/server_groups.py | 2 +- .../compute/contrib/server_start_stop.py | 2 +- .../api/openstack/compute/contrib/services.py | 2 +- nova/api/openstack/compute/contrib/shelve.py | 2 +- .../compute/contrib/simple_tenant_usage.py | 2 +- nova/api/openstack/compute/contrib/volumes.py | 2 +- nova/api/openstack/compute/extensions.py | 2 +- nova/api/openstack/compute/flavors.py | 2 +- nova/api/openstack/compute/image_metadata.py | 2 +- nova/api/openstack/compute/images.py | 2 +- nova/api/openstack/compute/ips.py | 2 +- nova/api/openstack/compute/limits.py | 2 +- .../api/openstack/compute/plugins/__init__.py | 2 +- .../compute/plugins/v3/access_ips.py | 2 +- .../compute/plugins/v3/admin_actions.py | 2 +- .../compute/plugins/v3/admin_password.py | 2 +- .../compute/plugins/v3/aggregates.py | 2 +- .../compute/plugins/v3/attach_interfaces.py | 2 +- .../api/openstack/compute/plugins/v3/cells.py | 2 +- .../compute/plugins/v3/certificates.py | 2 +- .../compute/plugins/v3/console_auth_tokens.py | 2 +- .../compute/plugins/v3/console_output.py | 2 +- .../openstack/compute/plugins/v3/evacuate.py | 2 +- .../compute/plugins/v3/extended_volumes.py | 2 +- .../compute/plugins/v3/flavor_access.py | 2 +- .../openstack/compute/plugins/v3/flavors.py | 2 +- .../compute/plugins/v3/flavors_extraspecs.py | 2 +- .../api/openstack/compute/plugins/v3/hosts.py | 2 +- .../compute/plugins/v3/hypervisors.py | 2 +- nova/api/openstack/compute/plugins/v3/ips.py | 2 +- .../openstack/compute/plugins/v3/keypairs.py | 2 +- .../compute/plugins/v3/multiple_create.py | 2 +- .../compute/plugins/v3/pause_server.py | 2 +- .../compute/plugins/v3/quota_sets.py | 2 +- .../compute/plugins/v3/remote_consoles.py | 2 +- .../openstack/compute/plugins/v3/rescue.py | 2 +- .../compute/plugins/v3/scheduler_hints.py | 2 +- .../compute/plugins/v3/server_actions.py | 2 +- .../plugins/v3/server_external_events.py | 2 +- .../compute/plugins/v3/server_metadata.py | 2 +- .../openstack/compute/plugins/v3/servers.py | 2 +- .../openstack/compute/plugins/v3/services.py | 2 +- nova/api/openstack/compute/server_metadata.py | 2 +- nova/api/openstack/compute/servers.py | 2 +- nova/api/openstack/compute/views/servers.py | 2 +- nova/api/openstack/extensions.py | 2 +- nova/api/openstack/wsgi.py | 17 ++--- nova/api/openstack/xmlutil.py | 2 +- nova/api/sizelimit.py | 2 +- nova/api/validation/validators.py | 2 +- nova/block_device.py | 2 +- nova/cells/filters/target_cell.py | 2 +- nova/cells/manager.py | 2 +- nova/cells/messaging.py | 2 +- nova/cells/rpcapi.py | 2 +- nova/cells/scheduler.py | 2 +- nova/cells/state.py | 2 +- nova/cells/weights/mute_child.py | 2 +- nova/cloudpipe/pipelib.py | 2 +- nova/cmd/all.py | 2 +- nova/cmd/baremetal_deploy_helper.py | 2 +- nova/cmd/baremetal_manage.py | 2 +- nova/cmd/compute.py | 2 +- nova/cmd/dhcpbridge.py | 2 +- nova/cmd/manage.py | 2 +- nova/cmd/network.py | 2 +- nova/compute/api.py | 2 +- nova/compute/claims.py | 2 +- nova/compute/flavors.py | 2 +- nova/compute/manager.py | 8 +-- nova/compute/monitors/__init__.py | 2 +- nova/compute/monitors/virt/cpu_monitor.py | 2 +- nova/compute/resource_tracker.py | 2 +- nova/compute/rpcapi.py | 2 +- nova/compute/utils.py | 2 +- nova/conductor/api.py | 2 +- nova/conductor/manager.py | 2 +- nova/conductor/tasks/live_migrate.py | 2 +- nova/console/websocketproxy.py | 2 +- nova/console/xvp.py | 2 +- nova/consoleauth/manager.py | 2 +- nova/context.py | 2 +- nova/crypto.py | 2 +- nova/db/api.py | 2 +- nova/db/sqlalchemy/api.py | 2 +- .../migrate_repo/versions/216_havana.py | 2 +- nova/db/sqlalchemy/migration.py | 2 +- nova/db/sqlalchemy/utils.py | 2 +- nova/debugger.py | 2 +- nova/exception.py | 2 +- nova/filters.py | 2 +- nova/hooks.py | 2 +- nova/i18n.py | 66 +++++++++++++++++++ nova/image/download/__init__.py | 2 +- nova/image/download/file.py | 2 +- nova/image/glance.py | 2 +- nova/image/s3.py | 2 +- nova/ipv6/account_identifier.py | 2 +- nova/ipv6/rfc2462.py | 2 +- nova/keymgr/conf_key_mgr.py | 2 +- nova/keymgr/mock_key_mgr.py | 2 +- nova/keymgr/single_key_mgr.py | 2 +- nova/network/api.py | 2 +- nova/network/base_api.py | 2 +- nova/network/driver.py | 2 +- nova/network/floating_ips.py | 2 +- nova/network/ldapdns.py | 2 +- nova/network/linux_net.py | 2 +- nova/network/manager.py | 2 +- nova/network/minidns.py | 2 +- nova/network/model.py | 2 +- nova/network/neutronv2/api.py | 2 +- nova/network/security_group/neutron_driver.py | 2 +- .../security_group/security_group_base.py | 2 +- nova/notifications.py | 2 +- nova/objects/base.py | 2 +- nova/objects/block_device.py | 2 +- nova/objects/fields.py | 2 +- nova/objects/instance.py | 2 +- nova/objects/instance_fault.py | 2 +- nova/objects/instance_info_cache.py | 2 +- nova/pci/pci_manager.py | 2 +- nova/quota.py | 2 +- nova/scheduler/chance.py | 2 +- nova/scheduler/driver.py | 2 +- nova/scheduler/filter_scheduler.py | 2 +- nova/scheduler/filters/compute_filter.py | 2 +- nova/scheduler/filters/core_filter.py | 2 +- nova/scheduler/filters/ram_filter.py | 2 +- nova/scheduler/filters/trusted_filter.py | 2 +- nova/scheduler/filters/utils.py | 2 +- nova/scheduler/host_manager.py | 2 +- nova/scheduler/scheduler_options.py | 2 +- nova/scheduler/utils.py | 2 +- nova/service.py | 2 +- nova/servicegroup/api.py | 2 +- nova/servicegroup/drivers/db.py | 2 +- nova/servicegroup/drivers/mc.py | 2 +- nova/servicegroup/drivers/zk.py | 2 +- nova/storage/linuxscsi.py | 2 +- .../compute/plugins/v3/test_servers.py | 2 +- .../api/openstack/compute/test_servers.py | 2 +- nova/tests/api/openstack/fakes.py | 2 +- nova/tests/api/openstack/test_faults.py | 34 +++++----- nova/tests/api/openstack/test_wsgi.py | 14 ++-- nova/tests/api/test_auth.py | 2 +- nova/tests/compute/test_compute.py | 2 +- nova/tests/compute/test_keypairs.py | 2 +- nova/tests/db/test_migrations.py | 2 +- nova/tests/fake_ldap.py | 2 +- nova/tests/fake_volume.py | 2 +- nova/tests/integrated/api/client.py | 2 +- .../tests/integrated/api_samples_test_base.py | 2 +- nova/tests/test_exception.py | 11 ---- nova/tests/test_nova_manage.py | 2 +- nova/tests/virt/hyperv/test_hypervapi.py | 2 +- nova/tests/virt/libvirt/fakelibvirt.py | 2 +- nova/tests/virt/vmwareapi/fake.py | 2 +- nova/tests/virt/vmwareapi/test_ds_util.py | 2 +- .../virt/xenapi/image/test_bittorrent.py | 2 +- nova/tests/virt/xenapi/test_vm_utils.py | 2 +- nova/utils.py | 5 +- nova/version.py | 2 +- nova/virt/baremetal/common.py | 2 +- nova/virt/baremetal/db/sqlalchemy/api.py | 2 +- .../virt/baremetal/db/sqlalchemy/migration.py | 2 +- nova/virt/baremetal/driver.py | 2 +- nova/virt/baremetal/iboot_pdu.py | 2 +- nova/virt/baremetal/ipmi.py | 2 +- nova/virt/baremetal/pxe.py | 2 +- nova/virt/baremetal/tilera.py | 2 +- nova/virt/baremetal/tilera_pdu.py | 2 +- nova/virt/baremetal/utils.py | 2 +- nova/virt/baremetal/vif_driver.py | 2 +- nova/virt/baremetal/virtual_power_driver.py | 2 +- nova/virt/baremetal/volume_driver.py | 2 +- nova/virt/block_device.py | 2 +- nova/virt/disk/api.py | 2 +- nova/virt/disk/mount/api.py | 2 +- nova/virt/disk/mount/loop.py | 2 +- nova/virt/disk/mount/nbd.py | 2 +- nova/virt/disk/vfs/guestfs.py | 2 +- nova/virt/disk/vfs/localfs.py | 2 +- nova/virt/driver.py | 2 +- nova/virt/event.py | 2 +- nova/virt/fake.py | 2 +- nova/virt/firewall.py | 4 +- nova/virt/hardware.py | 2 +- nova/virt/hyperv/basevolumeutils.py | 2 +- nova/virt/hyperv/driver.py | 2 +- nova/virt/hyperv/imagecache.py | 2 +- nova/virt/hyperv/livemigrationops.py | 2 +- nova/virt/hyperv/livemigrationutils.py | 2 +- nova/virt/hyperv/migrationops.py | 2 +- nova/virt/hyperv/networkutils.py | 2 +- nova/virt/hyperv/networkutilsv2.py | 2 +- nova/virt/hyperv/pathutils.py | 2 +- nova/virt/hyperv/snapshotops.py | 2 +- nova/virt/hyperv/vhdutils.py | 2 +- nova/virt/hyperv/vhdutilsv2.py | 2 +- nova/virt/hyperv/vmops.py | 2 +- nova/virt/hyperv/vmutils.py | 2 +- nova/virt/hyperv/volumeops.py | 2 +- nova/virt/hyperv/volumeutils.py | 2 +- nova/virt/hyperv/volumeutilsv2.py | 2 +- nova/virt/images.py | 2 +- nova/virt/libvirt/blockinfo.py | 2 +- nova/virt/libvirt/driver.py | 8 +-- nova/virt/libvirt/firewall.py | 4 +- nova/virt/libvirt/imagebackend.py | 4 +- nova/virt/libvirt/imagecache.py | 6 +- nova/virt/libvirt/lvm.py | 6 +- nova/virt/libvirt/utils.py | 6 +- nova/virt/libvirt/vif.py | 6 +- nova/virt/libvirt/volume.py | 6 +- nova/virt/storage_users.py | 2 +- nova/virt/vmwareapi/driver.py | 3 +- nova/virt/vmwareapi/ds_util.py | 2 +- nova/virt/vmwareapi/error_util.py | 2 +- nova/virt/vmwareapi/imagecache.py | 2 +- nova/virt/vmwareapi/io_util.py | 2 +- nova/virt/vmwareapi/network_util.py | 2 +- nova/virt/vmwareapi/vif.py | 2 +- nova/virt/vmwareapi/vim.py | 2 +- nova/virt/vmwareapi/vim_util.py | 2 +- nova/virt/vmwareapi/vm_util.py | 2 +- nova/virt/vmwareapi/vmops.py | 2 +- nova/virt/vmwareapi/volumeops.py | 2 +- nova/virt/xenapi/agent.py | 2 +- nova/virt/xenapi/client/session.py | 2 +- nova/virt/xenapi/driver.py | 2 +- nova/virt/xenapi/fake.py | 2 +- nova/virt/xenapi/host.py | 2 +- nova/virt/xenapi/image/bittorrent.py | 2 +- nova/virt/xenapi/network_utils.py | 2 +- nova/virt/xenapi/pool.py | 2 +- nova/virt/xenapi/vif.py | 2 +- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/vmops.py | 2 +- nova/virt/xenapi/volume_utils.py | 2 +- nova/virt/xenapi/volumeops.py | 2 +- nova/vnc/xvp_proxy.py | 2 +- nova/volume/cinder.py | 2 +- nova/volume/encryptors/__init__.py | 2 +- nova/wsgi.py | 2 +- requirements.txt | 1 + tools/db/schema_diff.py | 2 +- tools/esx/guest_tool.py | 2 +- tox.ini | 2 +- 296 files changed, 411 insertions(+), 366 deletions(-) create mode 100644 nova/i18n.py diff --git a/doc/source/devref/il8n.rst b/doc/source/devref/il8n.rst index 0073950551..3898e5302c 100644 --- a/doc/source/devref/il8n.rst +++ b/doc/source/devref/il8n.rst @@ -21,4 +21,4 @@ in nova/tests/test_localization.py. The ``_()`` function is found by doing:: - from nova.openstack.common.gettextutils import _ + from nova.i18n import _ diff --git a/nova/api/auth.py b/nova/api/auth.py index b0015cce2b..c2efb97033 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -21,7 +21,7 @@ import webob.exc from nova import context -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common.middleware import request_id diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 358e7c91e7..036c6bff7d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -32,8 +32,8 @@ from nova.api import validator from nova import context from nova import exception -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE +from nova.i18n import _ +from nova.i18n import _LE from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index f6afb1f123..aea4c7aafe 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -38,12 +38,12 @@ from nova.compute import vm_states from nova import db from nova import exception +from nova.i18n import _ from nova.image import s3 from nova import network from nova.network.security_group import neutron_driver from nova import objects from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import quota diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index 97cbe5627a..88bf255764 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -21,10 +21,10 @@ from nova import context from nova import db from nova import exception +from nova.i18n import _ from nova.network import model as network_model from nova import objects from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova.openstack.common import timeutils diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index 29c0443b29..d93296621e 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -27,7 +27,7 @@ from nova.api.metadata import base from nova import conductor from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova import utils diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py index ec32d29e84..6e067797b0 100644 --- a/nova/api/metadata/password.py +++ b/nova/api/metadata/password.py @@ -16,8 +16,8 @@ from webob import exc from nova import context +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova import utils diff --git a/nova/api/metadata/vendordata_json.py b/nova/api/metadata/vendordata_json.py index 55edd55120..b8e4e53cdd 100644 --- a/nova/api/metadata/vendordata_json.py +++ b/nova/api/metadata/vendordata_json.py @@ -20,7 +20,7 @@ from oslo.config import cfg from nova.api.metadata import base -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index cac785241d..b8d6e6568e 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -27,9 +27,9 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import exception +from nova.i18n import _ +from nova.i18n import translate from nova import notifications -from nova.openstack.common import gettextutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova import wsgi as base_wsgi @@ -107,12 +107,8 @@ def _error(self, inner, req): # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: - if isinstance(inner.msg_fmt, gettextutils.Message): - user_locale = req.best_match_language() - inner_msg = gettextutils.translate( - inner.msg_fmt, user_locale) - else: - inner_msg = unicode(inner) + user_locale = req.best_match_language() + inner_msg = translate(inner.message, user_locale) outer.explanation = '%s: %s' % (inner.__class__.__name__, inner_msg) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d0105e57d7..ef46f3cb72 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -29,7 +29,7 @@ from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import quota diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py index 4092cbbeaf..9f356f7852 100644 --- a/nova/api/openstack/compute/contrib/admin_actions.py +++ b/nova/api/openstack/compute/contrib/admin_actions.py @@ -24,7 +24,7 @@ from nova import compute from nova.compute import vm_states from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py index c5435c5f21..f062c00cae 100644 --- a/nova/api/openstack/compute/contrib/aggregates.py +++ b/nova/api/openstack/compute/contrib/aggregates.py @@ -22,7 +22,7 @@ from nova.api.openstack import extensions from nova.compute import api as compute_api from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py b/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py index b54008c278..76ea7d10e1 100644 --- a/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py +++ b/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py @@ -19,7 +19,7 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/contrib/attach_interfaces.py b/nova/api/openstack/compute/contrib/attach_interfaces.py index fdc6441b47..f3b4761724 100644 --- a/nova/api/openstack/compute/contrib/attach_interfaces.py +++ b/nova/api/openstack/compute/contrib/attach_interfaces.py @@ -21,8 +21,8 @@ from nova.api.openstack import extensions from nova import compute from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/contrib/baremetal_nodes.py b/nova/api/openstack/compute/contrib/baremetal_nodes.py index 22a3faa98c..bff5a45be0 100644 --- a/nova/api/openstack/compute/contrib/baremetal_nodes.py +++ b/nova/api/openstack/compute/contrib/baremetal_nodes.py @@ -22,7 +22,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.virt.baremetal import db authorize = extensions.extension_authorizer('compute', 'baremetal_nodes') diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py index 3937c24338..fa2661f10c 100644 --- a/nova/api/openstack/compute/contrib/cells.py +++ b/nova/api/openstack/compute/contrib/cells.py @@ -28,7 +28,7 @@ from nova.cells import rpcapi as cells_rpcapi from nova.compute import api as compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py index a483c44085..9f1131aae6 100644 --- a/nova/api/openstack/compute/contrib/certificates.py +++ b/nova/api/openstack/compute/contrib/certificates.py @@ -18,7 +18,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil import nova.cert.rpcapi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'certificates') diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py index ec24a56a13..c10c45b7f2 100644 --- a/nova/api/openstack/compute/contrib/cloudpipe.py +++ b/nova/api/openstack/compute/contrib/cloudpipe.py @@ -25,9 +25,9 @@ from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import exception +from nova.i18n import _ from nova import network from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import timeutils from nova import utils diff --git a/nova/api/openstack/compute/contrib/cloudpipe_update.py b/nova/api/openstack/compute/contrib/cloudpipe_update.py index 7ee8f14d7a..e601eb282a 100644 --- a/nova/api/openstack/compute/contrib/cloudpipe_update.py +++ b/nova/api/openstack/compute/contrib/cloudpipe_update.py @@ -17,8 +17,8 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ authorize = extensions.extension_authorizer('compute', 'cloudpipe_update') diff --git a/nova/api/openstack/compute/contrib/console_auth_tokens.py b/nova/api/openstack/compute/contrib/console_auth_tokens.py index 681cb8577e..a56e636b89 100644 --- a/nova/api/openstack/compute/contrib/console_auth_tokens.py +++ b/nova/api/openstack/compute/contrib/console_auth_tokens.py @@ -18,7 +18,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.consoleauth import rpcapi as consoleauth_rpcapi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'console_auth_tokens') diff --git a/nova/api/openstack/compute/contrib/console_output.py b/nova/api/openstack/compute/contrib/console_output.py index fab5c25afd..63aa1c8f04 100644 --- a/nova/api/openstack/compute/contrib/console_output.py +++ b/nova/api/openstack/compute/contrib/console_output.py @@ -22,7 +22,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'console_output') diff --git a/nova/api/openstack/compute/contrib/consoles.py b/nova/api/openstack/compute/contrib/consoles.py index 2ea7ca24d2..cf97764f6e 100644 --- a/nova/api/openstack/compute/contrib/consoles.py +++ b/nova/api/openstack/compute/contrib/consoles.py @@ -18,7 +18,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'consoles') diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py index 19817eab24..7118be549f 100644 --- a/nova/api/openstack/compute/contrib/disk_config.py +++ b/nova/api/openstack/compute/contrib/disk_config.py @@ -19,7 +19,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import strutils ALIAS = 'OS-DCF' diff --git a/nova/api/openstack/compute/contrib/evacuate.py b/nova/api/openstack/compute/contrib/evacuate.py index 723167a993..cd6f8f4ccb 100644 --- a/nova/api/openstack/compute/contrib/evacuate.py +++ b/nova/api/openstack/compute/contrib/evacuate.py @@ -20,7 +20,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import strutils from nova import utils diff --git a/nova/api/openstack/compute/contrib/fixed_ips.py b/nova/api/openstack/compute/contrib/fixed_ips.py index 805f1cbed5..199a4a105d 100644 --- a/nova/api/openstack/compute/contrib/fixed_ips.py +++ b/nova/api/openstack/compute/contrib/fixed_ips.py @@ -16,8 +16,8 @@ from nova.api.openstack import extensions from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ authorize = extensions.extension_authorizer('compute', 'fixed_ips') diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py index a497f75824..198dfbb8e2 100644 --- a/nova/api/openstack/compute/contrib/flavor_access.py +++ b/nova/api/openstack/compute/contrib/flavor_access.py @@ -21,8 +21,8 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ soft_authorize = extensions.soft_extension_authorizer('compute', diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py index 8ba0db9d7f..2f6f06f75c 100644 --- a/nova/api/openstack/compute/contrib/flavorextraspecs.py +++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py @@ -22,8 +22,8 @@ from nova.api.openstack import xmlutil from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova import utils authorize = extensions.extension_authorizer('compute', 'flavorextraspecs') diff --git a/nova/api/openstack/compute/contrib/flavormanage.py b/nova/api/openstack/compute/contrib/flavormanage.py index fe6b170eee..af5df772e0 100644 --- a/nova/api/openstack/compute/contrib/flavormanage.py +++ b/nova/api/openstack/compute/contrib/flavormanage.py @@ -18,7 +18,7 @@ from nova.api.openstack import wsgi from nova.compute import flavors from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'flavormanage') diff --git a/nova/api/openstack/compute/contrib/floating_ip_dns.py b/nova/api/openstack/compute/contrib/floating_ip_dns.py index 3e5fb9b5e2..771c25dfb7 100644 --- a/nova/api/openstack/compute/contrib/floating_ip_dns.py +++ b/nova/api/openstack/compute/contrib/floating_ip_dns.py @@ -20,8 +20,8 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova import utils diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py index bc1f18eb56..8bded0cc61 100644 --- a/nova/api/openstack/compute/contrib/floating_ips.py +++ b/nova/api/openstack/compute/contrib/floating_ips.py @@ -24,8 +24,8 @@ from nova import compute from nova.compute import utils as compute_utils from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import uuidutils diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py index 4044ce2bbe..4a6facba13 100644 --- a/nova/api/openstack/compute/contrib/floating_ips_bulk.py +++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py @@ -18,8 +18,8 @@ from nova.api.openstack import extensions from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging CONF = cfg.CONF diff --git a/nova/api/openstack/compute/contrib/fping.py b/nova/api/openstack/compute/contrib/fping.py index 2b8a753426..28128188f1 100644 --- a/nova/api/openstack/compute/contrib/fping.py +++ b/nova/api/openstack/compute/contrib/fping.py @@ -24,7 +24,7 @@ from nova.api.openstack import extensions from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils authorize = extensions.extension_authorizer('compute', 'fping') diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py index 94720773b8..cb2303799a 100644 --- a/nova/api/openstack/compute/contrib/hosts.py +++ b/nova/api/openstack/compute/contrib/hosts.py @@ -22,7 +22,7 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/contrib/hypervisors.py b/nova/api/openstack/compute/contrib/hypervisors.py index 33d3ad0d06..22b51fce74 100644 --- a/nova/api/openstack/compute/contrib/hypervisors.py +++ b/nova/api/openstack/compute/contrib/hypervisors.py @@ -22,7 +22,7 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'hypervisors') diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py index d39318a0cd..90805469dc 100644 --- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py +++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py @@ -21,7 +21,7 @@ from nova.api.openstack import extensions from nova import compute -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils CONF = cfg.CONF diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py index 1c58675725..cd023e8eae 100644 --- a/nova/api/openstack/compute/contrib/keypairs.py +++ b/nova/api/openstack/compute/contrib/keypairs.py @@ -24,7 +24,7 @@ from nova.api.openstack import xmlutil from nova.compute import api as compute_api from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'keypairs') diff --git a/nova/api/openstack/compute/contrib/multinic.py b/nova/api/openstack/compute/contrib/multinic.py index 6887c2ef8e..5bba1d86b7 100644 --- a/nova/api/openstack/compute/contrib/multinic.py +++ b/nova/api/openstack/compute/contrib/multinic.py @@ -22,7 +22,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py index f8005f1f24..50edcffe9b 100644 --- a/nova/api/openstack/compute/contrib/networks_associate.py +++ b/nova/api/openstack/compute/contrib/networks_associate.py @@ -15,8 +15,8 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py index fbc6b3aa83..12e396c2a3 100644 --- a/nova/api/openstack/compute/contrib/os_networks.py +++ b/nova/api/openstack/compute/contrib/os_networks.py @@ -21,8 +21,8 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py index 90ad2ba529..b7ecf4957f 100644 --- a/nova/api/openstack/compute/contrib/os_tenant_networks.py +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -22,8 +22,8 @@ from nova.api.openstack import extensions from nova import context as nova_context from nova import exception +from nova.i18n import _ import nova.network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import quota diff --git a/nova/api/openstack/compute/contrib/quota_classes.py b/nova/api/openstack/compute/contrib/quota_classes.py index 755bdefc9c..bb034a0b4d 100644 --- a/nova/api/openstack/compute/contrib/quota_classes.py +++ b/nova/api/openstack/compute/contrib/quota_classes.py @@ -21,7 +21,7 @@ import nova.context from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import quota from nova import utils diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py index 46bd353924..c8ba23b0e3 100644 --- a/nova/api/openstack/compute/contrib/quotas.py +++ b/nova/api/openstack/compute/contrib/quotas.py @@ -22,7 +22,7 @@ import nova.context from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import quota diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py index b5f9755b21..15338a2ffc 100644 --- a/nova/api/openstack/compute/contrib/rescue.py +++ b/nova/api/openstack/compute/contrib/rescue.py @@ -23,7 +23,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils diff --git a/nova/api/openstack/compute/contrib/scheduler_hints.py b/nova/api/openstack/compute/contrib/scheduler_hints.py index 2b2c129961..c1d69413a4 100644 --- a/nova/api/openstack/compute/contrib/scheduler_hints.py +++ b/nova/api/openstack/compute/contrib/scheduler_hints.py @@ -16,7 +16,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ class SchedulerHintsController(wsgi.Controller): diff --git a/nova/api/openstack/compute/contrib/security_group_default_rules.py b/nova/api/openstack/compute/contrib/security_group_default_rules.py index 6216cc3365..fa73368527 100644 --- a/nova/api/openstack/compute/contrib/security_group_default_rules.py +++ b/nova/api/openstack/compute/contrib/security_group_default_rules.py @@ -20,8 +20,8 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception +from nova.i18n import _ from nova.network.security_group import openstack_driver -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import xmlutils diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py index a9e4fe2c86..93d0604764 100644 --- a/nova/api/openstack/compute/contrib/security_groups.py +++ b/nova/api/openstack/compute/contrib/security_groups.py @@ -29,9 +29,9 @@ from nova import compute from nova.compute import api as compute_api from nova import exception +from nova.i18n import _ from nova.network.security_group import neutron_driver from nova.network.security_group import openstack_driver -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import xmlutils from nova.virt import netutils diff --git a/nova/api/openstack/compute/contrib/server_external_events.py b/nova/api/openstack/compute/contrib/server_external_events.py index 97dd23b532..34d12f7036 100644 --- a/nova/api/openstack/compute/contrib/server_external_events.py +++ b/nova/api/openstack/compute/contrib/server_external_events.py @@ -19,9 +19,9 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import external_event as external_event_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/contrib/server_groups.py b/nova/api/openstack/compute/contrib/server_groups.py index 0d10596ecd..3a970f83a3 100644 --- a/nova/api/openstack/compute/contrib/server_groups.py +++ b/nova/api/openstack/compute/contrib/server_groups.py @@ -23,8 +23,8 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil import nova.exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova import utils # NOTE(russellb) There is one other policy, 'legacy', but we don't allow that diff --git a/nova/api/openstack/compute/contrib/server_start_stop.py b/nova/api/openstack/compute/contrib/server_start_stop.py index d08f239e7f..96c9f11c58 100644 --- a/nova/api/openstack/compute/contrib/server_start_stop.py +++ b/nova/api/openstack/compute/contrib/server_start_stop.py @@ -18,8 +18,8 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py index 0c1509ab47..6ec91e6b01 100644 --- a/nova/api/openstack/compute/contrib/services.py +++ b/nova/api/openstack/compute/contrib/services.py @@ -20,7 +20,7 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import servicegroup from nova import utils diff --git a/nova/api/openstack/compute/contrib/shelve.py b/nova/api/openstack/compute/contrib/shelve.py index 76d73e111d..d5532ca9bd 100644 --- a/nova/api/openstack/compute/contrib/shelve.py +++ b/nova/api/openstack/compute/contrib/shelve.py @@ -22,7 +22,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ auth_shelve = exts.extension_authorizer('compute', 'shelve') diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py index 6b9c25cf9b..361df17493 100644 --- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py @@ -23,9 +23,9 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import instance as instance_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import timeutils authorize_show = extensions.extension_authorizer('compute', diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py index 8a613c8bcc..b2fa26f5e0 100644 --- a/nova/api/openstack/compute/contrib/volumes.py +++ b/nova/api/openstack/compute/contrib/volumes.py @@ -24,8 +24,8 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import uuidutils diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py index af3e4af757..24ea65234f 100644 --- a/nova/api/openstack/compute/extensions.py +++ b/nova/api/openstack/compute/extensions.py @@ -16,7 +16,7 @@ from oslo.config import cfg from nova.api.openstack import extensions as base_extensions -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging ext_opts = [ diff --git a/nova/api/openstack/compute/flavors.py b/nova/api/openstack/compute/flavors.py index 2aa8a32d24..3871131beb 100644 --- a/nova/api/openstack/compute/flavors.py +++ b/nova/api/openstack/compute/flavors.py @@ -20,7 +20,7 @@ from nova.api.openstack import xmlutil from nova.compute import flavors from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import strutils from nova import utils diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py index 4f871762d0..a77e37266e 100644 --- a/nova/api/openstack/compute/image_metadata.py +++ b/nova/api/openstack/compute/image_metadata.py @@ -18,8 +18,8 @@ from nova.api.openstack import common from nova.api.openstack import wsgi from nova import exception +from nova.i18n import _ from nova.image import glance -from nova.openstack.common.gettextutils import _ class Controller(object): diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py index 13a3c0deba..216e0858f7 100644 --- a/nova/api/openstack/compute/images.py +++ b/nova/api/openstack/compute/images.py @@ -20,8 +20,8 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception +from nova.i18n import _ import nova.image.glance -from nova.openstack.common.gettextutils import _ import nova.utils diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py index 3117b6b498..a537f21dcb 100644 --- a/nova/api/openstack/compute/ips.py +++ b/nova/api/openstack/compute/ips.py @@ -20,7 +20,7 @@ from nova.api.openstack.compute.views import addresses as view_addresses from nova.api.openstack import wsgi from nova.api.openstack import xmlutil -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ def make_network(elem): diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py index 90194088ac..655934723b 100644 --- a/nova/api/openstack/compute/limits.py +++ b/nova/api/openstack/compute/limits.py @@ -44,7 +44,7 @@ from nova.api.openstack.compute.views import limits as limits_views from nova.api.openstack import wsgi from nova.api.openstack import xmlutil -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova import quota diff --git a/nova/api/openstack/compute/plugins/__init__.py b/nova/api/openstack/compute/plugins/__init__.py index c9f8605e2b..73857e2541 100644 --- a/nova/api/openstack/compute/plugins/__init__.py +++ b/nova/api/openstack/compute/plugins/__init__.py @@ -14,7 +14,7 @@ from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/plugins/v3/access_ips.py b/nova/api/openstack/compute/plugins/v3/access_ips.py index 3804c031c2..d7378d3a2a 100644 --- a/nova/api/openstack/compute/plugins/v3/access_ips.py +++ b/nova/api/openstack/compute/plugins/v3/access_ips.py @@ -16,7 +16,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/api/openstack/compute/plugins/v3/admin_actions.py b/nova/api/openstack/compute/plugins/v3/admin_actions.py index 326d9e7642..0ebb82f32e 100644 --- a/nova/api/openstack/compute/plugins/v3/admin_actions.py +++ b/nova/api/openstack/compute/plugins/v3/admin_actions.py @@ -21,7 +21,7 @@ from nova import compute from nova.compute import vm_states from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/plugins/v3/admin_password.py b/nova/api/openstack/compute/plugins/v3/admin_password.py index 9265811bb2..7bf0f86529 100644 --- a/nova/api/openstack/compute/plugins/v3/admin_password.py +++ b/nova/api/openstack/compute/plugins/v3/admin_password.py @@ -21,7 +21,7 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-admin-password" diff --git a/nova/api/openstack/compute/plugins/v3/aggregates.py b/nova/api/openstack/compute/plugins/v3/aggregates.py index dde3abc92b..61474c4d01 100644 --- a/nova/api/openstack/compute/plugins/v3/aggregates.py +++ b/nova/api/openstack/compute/plugins/v3/aggregates.py @@ -25,7 +25,7 @@ from nova.api import validation from nova.compute import api as compute_api from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py index 477f432be9..ed805dc574 100644 --- a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py +++ b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py @@ -24,8 +24,8 @@ from nova.api import validation from nova import compute from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/plugins/v3/cells.py b/nova/api/openstack/compute/plugins/v3/cells.py index 31c3ffb133..2c7c9eae6f 100644 --- a/nova/api/openstack/compute/plugins/v3/cells.py +++ b/nova/api/openstack/compute/plugins/v3/cells.py @@ -27,7 +27,7 @@ from nova.cells import rpcapi as cells_rpcapi from nova.compute import api as compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils diff --git a/nova/api/openstack/compute/plugins/v3/certificates.py b/nova/api/openstack/compute/plugins/v3/certificates.py index 780cb4914e..6ab3d43231 100644 --- a/nova/api/openstack/compute/plugins/v3/certificates.py +++ b/nova/api/openstack/compute/plugins/v3/certificates.py @@ -18,8 +18,8 @@ from nova.api.openstack import wsgi import nova.cert.rpcapi from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ ALIAS = "os-certificates" authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py b/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py index c1e0482218..3d6f47740d 100644 --- a/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py +++ b/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py @@ -18,7 +18,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.consoleauth import rpcapi as consoleauth_rpcapi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-console-auth-tokens" authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/console_output.py b/nova/api/openstack/compute/plugins/v3/console_output.py index 37cef3fd46..7921298bf8 100644 --- a/nova/api/openstack/compute/plugins/v3/console_output.py +++ b/nova/api/openstack/compute/plugins/v3/console_output.py @@ -23,7 +23,7 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-console-output" authorize = extensions.extension_authorizer('compute', "v3:" + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/evacuate.py b/nova/api/openstack/compute/plugins/v3/evacuate.py index dc0f39e6ad..de04f24a2c 100644 --- a/nova/api/openstack/compute/plugins/v3/evacuate.py +++ b/nova/api/openstack/compute/plugins/v3/evacuate.py @@ -23,7 +23,7 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import utils diff --git a/nova/api/openstack/compute/plugins/v3/extended_volumes.py b/nova/api/openstack/compute/plugins/v3/extended_volumes.py index 6d5eb48630..1a4714f3fd 100644 --- a/nova/api/openstack/compute/plugins/v3/extended_volumes.py +++ b/nova/api/openstack/compute/plugins/v3/extended_volumes.py @@ -23,8 +23,8 @@ from nova.api import validation from nova import compute from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import volume diff --git a/nova/api/openstack/compute/plugins/v3/flavor_access.py b/nova/api/openstack/compute/plugins/v3/flavor_access.py index 762f24f52e..7c6e1ccb1e 100644 --- a/nova/api/openstack/compute/plugins/v3/flavor_access.py +++ b/nova/api/openstack/compute/plugins/v3/flavor_access.py @@ -22,8 +22,8 @@ from nova.api.openstack import wsgi from nova.api import validation from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ ALIAS = 'flavor-access' soft_authorize = extensions.soft_extension_authorizer('compute', diff --git a/nova/api/openstack/compute/plugins/v3/flavors.py b/nova/api/openstack/compute/plugins/v3/flavors.py index d29156b03b..7e84219b3e 100644 --- a/nova/api/openstack/compute/plugins/v3/flavors.py +++ b/nova/api/openstack/compute/plugins/v3/flavors.py @@ -20,7 +20,7 @@ from nova.api.openstack import wsgi from nova.compute import flavors from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import strutils from nova import utils diff --git a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py index 6c4fd157e6..90b3070a6e 100644 --- a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py +++ b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py @@ -19,8 +19,8 @@ from nova.api.openstack import wsgi from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ class FlavorExtraSpecsController(object): diff --git a/nova/api/openstack/compute/plugins/v3/hosts.py b/nova/api/openstack/compute/plugins/v3/hosts.py index 5087b0171e..324f50f119 100644 --- a/nova/api/openstack/compute/plugins/v3/hosts.py +++ b/nova/api/openstack/compute/plugins/v3/hosts.py @@ -21,7 +21,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/plugins/v3/hypervisors.py b/nova/api/openstack/compute/plugins/v3/hypervisors.py index 644d73a6be..80d0250bc2 100644 --- a/nova/api/openstack/compute/plugins/v3/hypervisors.py +++ b/nova/api/openstack/compute/plugins/v3/hypervisors.py @@ -20,7 +20,7 @@ from nova.api.openstack import extensions from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-hypervisors" diff --git a/nova/api/openstack/compute/plugins/v3/ips.py b/nova/api/openstack/compute/plugins/v3/ips.py index ded2524fbc..fe5915b30c 100644 --- a/nova/api/openstack/compute/plugins/v3/ips.py +++ b/nova/api/openstack/compute/plugins/v3/ips.py @@ -20,7 +20,7 @@ from nova.api.openstack.compute.views import addresses as views_addresses from nova.api.openstack import extensions from nova.api.openstack import wsgi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ class IPsController(wsgi.Controller): diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py index 503a1a5126..1abbeacfe7 100644 --- a/nova/api/openstack/compute/plugins/v3/keypairs.py +++ b/nova/api/openstack/compute/plugins/v3/keypairs.py @@ -24,7 +24,7 @@ from nova.api import validation from nova.compute import api as compute_api from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = 'keypairs' diff --git a/nova/api/openstack/compute/plugins/v3/multiple_create.py b/nova/api/openstack/compute/plugins/v3/multiple_create.py index 684ec07e0c..5f60f702fe 100644 --- a/nova/api/openstack/compute/plugins/v3/multiple_create.py +++ b/nova/api/openstack/compute/plugins/v3/multiple_create.py @@ -17,7 +17,7 @@ from nova.api.openstack import extensions from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import strutils from nova import utils diff --git a/nova/api/openstack/compute/plugins/v3/pause_server.py b/nova/api/openstack/compute/plugins/v3/pause_server.py index 25b758b986..dbbead7c07 100644 --- a/nova/api/openstack/compute/plugins/v3/pause_server.py +++ b/nova/api/openstack/compute/plugins/v3/pause_server.py @@ -21,7 +21,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/api/openstack/compute/plugins/v3/quota_sets.py b/nova/api/openstack/compute/plugins/v3/quota_sets.py index 8dd99b6905..8b9bf46d28 100644 --- a/nova/api/openstack/compute/plugins/v3/quota_sets.py +++ b/nova/api/openstack/compute/plugins/v3/quota_sets.py @@ -23,7 +23,7 @@ import nova.context from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import quota diff --git a/nova/api/openstack/compute/plugins/v3/remote_consoles.py b/nova/api/openstack/compute/plugins/v3/remote_consoles.py index 7d8310f38c..0f1277c957 100644 --- a/nova/api/openstack/compute/plugins/v3/remote_consoles.py +++ b/nova/api/openstack/compute/plugins/v3/remote_consoles.py @@ -21,7 +21,7 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-remote-consoles" diff --git a/nova/api/openstack/compute/plugins/v3/rescue.py b/nova/api/openstack/compute/plugins/v3/rescue.py index ea8fa75bdc..138c21116c 100644 --- a/nova/api/openstack/compute/plugins/v3/rescue.py +++ b/nova/api/openstack/compute/plugins/v3/rescue.py @@ -25,7 +25,7 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils diff --git a/nova/api/openstack/compute/plugins/v3/scheduler_hints.py b/nova/api/openstack/compute/plugins/v3/scheduler_hints.py index c2fed29ab9..35972fdf40 100644 --- a/nova/api/openstack/compute/plugins/v3/scheduler_hints.py +++ b/nova/api/openstack/compute/plugins/v3/scheduler_hints.py @@ -15,7 +15,7 @@ import webob.exc from nova.api.openstack import extensions -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-scheduler-hints" diff --git a/nova/api/openstack/compute/plugins/v3/server_actions.py b/nova/api/openstack/compute/plugins/v3/server_actions.py index edfed81f6c..876972f066 100644 --- a/nova/api/openstack/compute/plugins/v3/server_actions.py +++ b/nova/api/openstack/compute/plugins/v3/server_actions.py @@ -19,7 +19,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ALIAS = "os-server-actions" authorize_actions = extensions.extension_authorizer('compute', diff --git a/nova/api/openstack/compute/plugins/v3/server_external_events.py b/nova/api/openstack/compute/plugins/v3/server_external_events.py index b3f848d145..bc59733c04 100644 --- a/nova/api/openstack/compute/plugins/v3/server_external_events.py +++ b/nova/api/openstack/compute/plugins/v3/server_external_events.py @@ -18,9 +18,9 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import external_event as external_event_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/api/openstack/compute/plugins/v3/server_metadata.py b/nova/api/openstack/compute/plugins/v3/server_metadata.py index 7997858336..035a57472b 100644 --- a/nova/api/openstack/compute/plugins/v3/server_metadata.py +++ b/nova/api/openstack/compute/plugins/v3/server_metadata.py @@ -20,7 +20,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ class ServerMetadataController(wsgi.Controller): diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 60ab3b8ad8..505d651b8b 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -31,9 +31,9 @@ from nova import compute from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova.image import glance from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils diff --git a/nova/api/openstack/compute/plugins/v3/services.py b/nova/api/openstack/compute/plugins/v3/services.py index 6ef0507ff6..f07966de1d 100644 --- a/nova/api/openstack/compute/plugins/v3/services.py +++ b/nova/api/openstack/compute/plugins/v3/services.py @@ -19,7 +19,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import servicegroup from nova import utils diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py index 7e11d1184c..a0340bcb60 100644 --- a/nova/api/openstack/compute/server_metadata.py +++ b/nova/api/openstack/compute/server_metadata.py @@ -19,7 +19,7 @@ from nova.api.openstack import wsgi from nova import compute from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ class Controller(object): diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index ab4f262ba3..5bc97e355c 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -33,9 +33,9 @@ from nova import compute from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import instance as instance_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py index f09d4272e2..f168aac4e5 100644 --- a/nova/api/openstack/compute/views/servers.py +++ b/nova/api/openstack/compute/views/servers.py @@ -21,8 +21,8 @@ from nova.api.openstack.compute.views import flavors as views_flavors from nova.api.openstack.compute.views import images as views_images from nova.compute import flavors +from nova.i18n import _ from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import utils diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 5ea32160b6..65099be0a0 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -26,7 +26,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging import nova.policy diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index d8458af2f5..531213e158 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -25,8 +25,8 @@ from nova.api.openstack import xmlutil from nova import exception -from nova.openstack.common import gettextutils -from nova.openstack.common.gettextutils import _ +from nova import i18n +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils @@ -193,7 +193,7 @@ def best_match_language(self): if not self.accept_language: return None return self.accept_language.best_match( - gettextutils.get_available_languages('nova')) + i18n.get_available_languages()) class ActionDispatcher(object): @@ -1197,8 +1197,7 @@ def __call__(self, req): LOG.debug("Returning %(code)s to user: %(explanation)s", {'code': code, 'explanation': explanation}) - explanation = gettextutils.translate(explanation, - user_locale) + explanation = i18n.translate(explanation, user_locale) fault_data = { fault_name: { 'code': code, @@ -1261,13 +1260,9 @@ def __call__(self, request): metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}} self.content['overLimit']['message'] = \ - gettextutils.translate( - self.content['overLimit']['message'], - user_locale) + i18n.translate(self.content['overLimit']['message'], user_locale) self.content['overLimit']['details'] = \ - gettextutils.translate( - self.content['overLimit']['details'], - user_locale) + i18n.translate(self.content['overLimit']['details'], user_locale) xml_serializer = XMLDictSerializer(metadata, XMLNS_V11) serializer = { diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py index 68e50f82ea..b401d83525 100644 --- a/nova/api/openstack/xmlutil.py +++ b/nova/api/openstack/xmlutil.py @@ -23,7 +23,7 @@ import six from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py index 0248cbacb0..1fab96b3df 100644 --- a/nova/api/sizelimit.py +++ b/nova/api/sizelimit.py @@ -20,7 +20,7 @@ import webob.dec import webob.exc -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import wsgi diff --git a/nova/api/validation/validators.py b/nova/api/validation/validators.py index 3e5e82accb..8cec2de094 100644 --- a/nova/api/validation/validators.py +++ b/nova/api/validation/validators.py @@ -19,7 +19,7 @@ import jsonschema from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import uuidutils diff --git a/nova/block_device.py b/nova/block_device.py index 34453ec6de..b0e048d6c5 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -18,7 +18,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import utils diff --git a/nova/cells/filters/target_cell.py b/nova/cells/filters/target_cell.py index de2ef6d18e..43c81e71e5 100644 --- a/nova/cells/filters/target_cell.py +++ b/nova/cells/filters/target_cell.py @@ -22,7 +22,7 @@ """ from nova.cells import filters -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/cells/manager.py b/nova/cells/manager.py index f40bc58ea3..5bad71fa8d 100644 --- a/nova/cells/manager.py +++ b/nova/cells/manager.py @@ -27,10 +27,10 @@ from nova.cells import utils as cells_utils from nova import context from nova import exception +from nova.i18n import _ from nova import manager from nova import objects from nova.objects import base as base_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import periodic_task diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index b593ae01a8..55fbfd75f1 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -44,12 +44,12 @@ from nova import context from nova.db import base from nova import exception +from nova.i18n import _ from nova.network import model as network_model from nova import objects from nova.objects import base as objects_base from nova.objects import instance_fault as instance_fault_obj from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py index 4a82de2795..87cf8f3c46 100644 --- a/nova/cells/rpcapi.py +++ b/nova/cells/rpcapi.py @@ -27,8 +27,8 @@ from oslo import messaging from nova import exception +from nova.i18n import _ from nova.objects import base as objects_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import rpc diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py index bc9144511e..d9552f4324 100644 --- a/nova/cells/scheduler.py +++ b/nova/cells/scheduler.py @@ -30,10 +30,10 @@ from nova import conductor from nova.db import base from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import base as obj_base from nova.objects import instance_action as instance_action_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import utils as scheduler_utils from nova import utils diff --git a/nova/cells/state.py b/nova/cells/state.py index c63df3f955..27261a2c98 100644 --- a/nova/cells/state.py +++ b/nova/cells/state.py @@ -26,8 +26,8 @@ from nova import context from nova.db import base from nova import exception +from nova.i18n import _ from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/cells/weights/mute_child.py b/nova/cells/weights/mute_child.py index ff42d2673f..cc5c0a8c44 100644 --- a/nova/cells/weights/mute_child.py +++ b/nova/cells/weights/mute_child.py @@ -21,7 +21,7 @@ from oslo.config import cfg from nova.cells import weights -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 8ac164cbb3..8962924b69 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -31,8 +31,8 @@ from nova import crypto from nova import db from nova import exception +from nova.i18n import _ from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import paths from nova import utils diff --git a/nova/cmd/all.py b/nova/cmd/all.py index 75946cd29c..7d7c9d43b1 100644 --- a/nova/cmd/all.py +++ b/nova/cmd/all.py @@ -29,9 +29,9 @@ from oslo.config import cfg from nova import config +from nova.i18n import _ from nova import objects from nova.objectstore import s3server -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import service from nova import utils diff --git a/nova/cmd/baremetal_deploy_helper.py b/nova/cmd/baremetal_deploy_helper.py index fd997ce8e1..348561af99 100644 --- a/nova/cmd/baremetal_deploy_helper.py +++ b/nova/cmd/baremetal_deploy_helper.py @@ -29,9 +29,9 @@ from nova import config from nova import context as nova_context +from nova.i18n import _ from nova import objects from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import units diff --git a/nova/cmd/baremetal_manage.py b/nova/cmd/baremetal_manage.py index b62f744b56..e8283221ec 100644 --- a/nova/cmd/baremetal_manage.py +++ b/nova/cmd/baremetal_manage.py @@ -58,9 +58,9 @@ import six from nova import config +from nova.i18n import _ from nova import objects from nova.openstack.common import cliutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import version from nova.virt.baremetal.db import migration as bmdb_migration diff --git a/nova/cmd/compute.py b/nova/cmd/compute.py index 52f3c93ab9..bad4cf76d7 100644 --- a/nova/cmd/compute.py +++ b/nova/cmd/compute.py @@ -25,9 +25,9 @@ from nova import config import nova.db.api from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import base as objects_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service diff --git a/nova/cmd/dhcpbridge.py b/nova/cmd/dhcpbridge.py index 1045020065..2abb6a8ffd 100644 --- a/nova/cmd/dhcpbridge.py +++ b/nova/cmd/dhcpbridge.py @@ -31,11 +31,11 @@ from nova import context import nova.db.api from nova import exception +from nova.i18n import _ from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import base as objects_base from nova.objects import network as network_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index 5f75cb0223..6cd599f3b0 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -72,10 +72,10 @@ from nova import db from nova.db import migration from nova import exception +from nova.i18n import _ from nova import objects from nova.openstack.common import cliutils from nova.openstack.common.db import exception as db_exc -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import quota diff --git a/nova/cmd/network.py b/nova/cmd/network.py index 73d5c89bf2..490097aa71 100644 --- a/nova/cmd/network.py +++ b/nova/cmd/network.py @@ -25,9 +25,9 @@ from nova import config import nova.db.api from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import base as objects_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service diff --git a/nova/compute/api.py b/nova/compute/api.py index 612e1c1da2..0f2e195bdf 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -43,6 +43,7 @@ from nova.db import base from nova import exception from nova import hooks +from nova.i18n import _ from nova import image from nova import network from nova.network import model as network_model @@ -54,7 +55,6 @@ from nova.objects import quotas as quotas_obj from nova.objects import security_group as security_group_obj from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 27d8c0bc7b..046d171692 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -18,8 +18,8 @@ """ from nova import exception +from nova.i18n import _ from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.pci import pci_request diff --git a/nova/compute/flavors.py b/nova/compute/flavors.py index e391943158..1a8760d408 100644 --- a/nova/compute/flavors.py +++ b/nova/compute/flavors.py @@ -27,8 +27,8 @@ from nova import context from nova import db from nova import exception +from nova.i18n import _ from nova.openstack.common.db import exception as db_exc -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.pci import pci_request diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f71f7e4c84..c9e94ef345 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -57,6 +57,10 @@ import nova.context from nova import exception from nova import hooks +from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LI +from nova.i18n import _LW from nova import image from nova.image import glance from nova import manager @@ -68,10 +72,6 @@ from nova.objects import instance as instance_obj from nova.objects import quotas as quotas_obj from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE -from nova.openstack.common.gettextutils import _LI -from nova.openstack.common.gettextutils import _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import periodic_task diff --git a/nova/compute/monitors/__init__.py b/nova/compute/monitors/__init__.py index 511738599f..fab30ada13 100644 --- a/nova/compute/monitors/__init__.py +++ b/nova/compute/monitors/__init__.py @@ -26,8 +26,8 @@ from oslo.config import cfg import six +from nova.i18n import _ from nova import loadables -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/compute/monitors/virt/cpu_monitor.py b/nova/compute/monitors/virt/cpu_monitor.py index 1cc92db370..9295120769 100644 --- a/nova/compute/monitors/virt/cpu_monitor.py +++ b/nova/compute/monitors/virt/cpu_monitor.py @@ -22,7 +22,7 @@ from nova.compute import monitors from nova.compute.monitors import cpu_monitor as monitor from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index aaf0eb3890..86949dd076 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -29,9 +29,9 @@ from nova import conductor from nova import context from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index 7dc4543c50..5de33f0729 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -21,8 +21,8 @@ from nova import block_device from nova import exception +from nova.i18n import _ from nova.objects import base as objects_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova import rpc diff --git a/nova/compute/utils.py b/nova/compute/utils.py index a0161029f0..8836f5039d 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -25,11 +25,11 @@ from nova.compute import power_state from nova.compute import task_states from nova import exception +from nova.i18n import _ from nova.network import model as network_model from nova import notifications from nova import objects from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log from nova import rpc from nova import utils diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 576364c198..b19be6d67a 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -20,7 +20,7 @@ from nova import baserpc from nova.conductor import manager from nova.conductor import rpcapi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 99aefc6b2c..2166d3daa8 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -31,6 +31,7 @@ from nova.conductor.tasks import live_migrate from nova.db import base from nova import exception +from nova.i18n import _ from nova import image from nova import manager from nova import network @@ -40,7 +41,6 @@ from nova.objects import base as nova_object from nova.objects import quotas as quotas_obj from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py index ec349150fe..f92f94e22f 100644 --- a/nova/conductor/tasks/live_migrate.py +++ b/nova/conductor/tasks/live_migrate.py @@ -17,8 +17,8 @@ from nova.compute import utils as compute_utils from nova import db from nova import exception +from nova.i18n import _ from nova import image -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import rpcapi as scheduler_rpcapi from nova.scheduler import utils as scheduler_utils diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py index d02dcd043b..fbc2be8933 100644 --- a/nova/console/websocketproxy.py +++ b/nova/console/websocketproxy.py @@ -25,7 +25,7 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/console/xvp.py b/nova/console/xvp.py index f55645b2d6..85e3f43235 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -23,8 +23,8 @@ from nova import context from nova import db +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import paths diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py index 966b398a45..ff92f1b579 100644 --- a/nova/consoleauth/manager.py +++ b/nova/consoleauth/manager.py @@ -23,9 +23,9 @@ from nova.cells import rpcapi as cells_rpcapi from nova.compute import rpcapi as compute_rpcapi +from nova.i18n import _, _LW from nova import manager from nova import objects -from nova.openstack.common.gettextutils import _, _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import memorycache diff --git a/nova/context.py b/nova/context.py index ae0d9b51fd..c512ad534c 100644 --- a/nova/context.py +++ b/nova/context.py @@ -23,7 +23,7 @@ import six from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import local from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/crypto.py b/nova/crypto.py index 8d6f8d4402..ebbc3068cd 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -35,9 +35,9 @@ from nova import context from nova import db from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import timeutils diff --git a/nova/db/api.py b/nova/db/api.py index ee7ad22209..ee48e1151e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -31,8 +31,8 @@ from oslo.config import cfg from nova.cells import rpcapi as cells_rpcapi +from nova.i18n import _ from nova.openstack.common.db import api as db_api -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 0d5458b849..a333cb2721 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -52,11 +52,11 @@ import nova.context from nova.db.sqlalchemy import models from nova import exception +from nova.i18n import _ from nova.openstack.common.db import exception as db_exc from nova.openstack.common.db.sqlalchemy import session as db_session from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.openstack.common import uuidutils diff --git a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py index 8db11e692b..1712784e4b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py @@ -20,7 +20,7 @@ from sqlalchemy import Text from sqlalchemy.types import NullType -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 2d140e67d9..388e6d2fee 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -23,7 +23,7 @@ from nova.db.sqlalchemy import api as db_session from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ INIT_VERSION = 215 _REPOSITORY = None diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py index 79cecc171b..fcb33922a2 100644 --- a/nova/db/sqlalchemy/utils.py +++ b/nova/db/sqlalchemy/utils.py @@ -23,8 +23,8 @@ from nova.db.sqlalchemy import api as db from nova import exception +from nova.i18n import _ from nova.openstack.common.db.sqlalchemy import utils as oslodbutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/debugger.py b/nova/debugger.py index 29d25d482f..8aa889f1a2 100644 --- a/nova/debugger.py +++ b/nova/debugger.py @@ -60,7 +60,7 @@ def init(): if not (CONF.remote_debug.host and CONF.remote_debug.port): return - from nova.openstack.common.gettextutils import _ + from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/exception.py b/nova/exception.py index 8eff489bb2..64f79d5871 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -28,8 +28,8 @@ from oslo.config import cfg import webob.exc +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import safe_utils diff --git a/nova/filters.py b/nova/filters.py index 0fcb8560c4..1ecd988249 100644 --- a/nova/filters.py +++ b/nova/filters.py @@ -17,8 +17,8 @@ Filter support """ +from nova.i18n import _ from nova import loadables -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/hooks.py b/nova/hooks.py index d56455e2a1..3c67bb3aed 100644 --- a/nova/hooks.py +++ b/nova/hooks.py @@ -46,7 +46,7 @@ def post(self, f, *args, **kwards): import stevedore -from nova.openstack.common.gettextutils import _LE +from nova.i18n import _LE from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/i18n.py b/nova/i18n.py new file mode 100644 index 0000000000..e3e5673398 --- /dev/null +++ b/nova/i18n.py @@ -0,0 +1,66 @@ +# Copyright 2014 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html . + +""" + +from oslo import i18n + +from nova.openstack.common import gettextutils + +DOMAIN = 'nova' + +_translators = i18n.TranslatorFactory(domain=DOMAIN) + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + + +def translate(value, user_locale): + return i18n.translate(value, user_locale) + + +def get_available_languages(): + return i18n.get_available_languages(DOMAIN) + + +# Parts in oslo-incubator are still using gettextutils._(), _LI(), etc., from +# oslo-incubator. Until these parts are changed to use oslo.i18n, Keystone +# needs to do something to allow them to work. One option is to continue to +# initialize gettextutils, but with the way that Nova has initialization +# spread out over mutltiple entry points, we'll monkey-patch +# gettextutils._(), _LI(), etc., to use our oslo.i18n versions. + +# FIXME(dims): Remove the monkey-patching and update openstack-common.conf and +# do a sync with oslo-incubator to remove gettextutils once oslo-incubator +# isn't using oslo-incubator gettextutils any more. + +gettextutils._ = _ +gettextutils._LI = _LI +gettextutils._LW = _LW +gettextutils._LE = _LE +gettextutils._LC = _LC diff --git a/nova/image/download/__init__.py b/nova/image/download/__init__.py index ad0affb213..55d125b85c 100644 --- a/nova/image/download/__init__.py +++ b/nova/image/download/__init__.py @@ -19,7 +19,7 @@ import stevedore.driver import stevedore.extension -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) diff --git a/nova/image/download/file.py b/nova/image/download/file.py index 7dc6316703..93ec551c65 100644 --- a/nova/image/download/file.py +++ b/nova/image/download/file.py @@ -18,8 +18,8 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ import nova.image.download.base as xfer_base -from nova.openstack.common.gettextutils import _ import nova.virt.libvirt.utils as lv_utils diff --git a/nova/image/glance.py b/nova/image/glance.py index de81d3d51d..7d20ad277c 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -31,8 +31,8 @@ import six.moves.urllib.parse as urlparse from nova import exception +from nova.i18n import _ import nova.image.download as image_xfers -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/image/s3.py b/nova/image/s3.py index db54630bef..05a532baee 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,8 +31,8 @@ from nova.api.ec2 import ec2utils import nova.cert.rpcapi from nova import exception +from nova.i18n import _ from nova.image import glance -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index db9c658968..23d77c7f95 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -21,7 +21,7 @@ import netaddr -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ def to_global(prefix, mac, project_id): diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py index cda35b0a45..92746e5a07 100644 --- a/nova/ipv6/rfc2462.py +++ b/nova/ipv6/rfc2462.py @@ -19,7 +19,7 @@ import netaddr -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ def to_global(prefix, mac, project_id): diff --git a/nova/keymgr/conf_key_mgr.py b/nova/keymgr/conf_key_mgr.py index 4b9cb67208..3cb44a5a80 100644 --- a/nova/keymgr/conf_key_mgr.py +++ b/nova/keymgr/conf_key_mgr.py @@ -33,8 +33,8 @@ from oslo.config import cfg +from nova.i18n import _ from nova.keymgr import single_key_mgr -from nova.openstack.common.gettextutils import _ key_mgr_opts = [ cfg.StrOpt('fixed_key', diff --git a/nova/keymgr/mock_key_mgr.py b/nova/keymgr/mock_key_mgr.py index 51684fec46..af09b6877b 100644 --- a/nova/keymgr/mock_key_mgr.py +++ b/nova/keymgr/mock_key_mgr.py @@ -29,9 +29,9 @@ import array from nova import exception +from nova.i18n import _ from nova.keymgr import key from nova.keymgr import key_mgr -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import uuidutils from nova import utils diff --git a/nova/keymgr/single_key_mgr.py b/nova/keymgr/single_key_mgr.py index b6d4f35d01..33c24c0e63 100644 --- a/nova/keymgr/single_key_mgr.py +++ b/nova/keymgr/single_key_mgr.py @@ -20,8 +20,8 @@ from nova import exception +from nova.i18n import _ from nova.keymgr import mock_key_mgr -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/network/api.py b/nova/network/api.py index 36c99a31e4..2aec7e8f8e 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -20,13 +20,13 @@ from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova.network import base_api from nova.network import floating_ips from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import policy from nova import utils diff --git a/nova/network/base_api.py b/nova/network/base_api.py index 54674e1244..dd712ac419 100644 --- a/nova/network/base_api.py +++ b/nova/network/base_api.py @@ -18,10 +18,10 @@ from nova.db import base from nova import hooks +from nova.i18n import _ from nova.network import model as network_model from nova import objects from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/network/driver.py b/nova/network/driver.py index 973ec30f8e..04e7d607f8 100644 --- a/nova/network/driver.py +++ b/nova/network/driver.py @@ -16,7 +16,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py index 17235f6e6e..9782f673f2 100644 --- a/nova/network/floating_ips.py +++ b/nova/network/floating_ips.py @@ -21,10 +21,10 @@ from nova import context from nova.db import base from nova import exception +from nova.i18n import _ from nova.network import rpcapi as network_rpcapi from nova import objects from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py index 4d5bb0c453..488467f119 100644 --- a/nova/network/ldapdns.py +++ b/nova/network/ldapdns.py @@ -23,8 +23,8 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ from nova.network import dns_driver -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index cc099801db..84885be6d5 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -27,10 +27,10 @@ import six from nova import exception +from nova.i18n import _ from nova import objects from nova.openstack.common import excutils from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/network/manager.py b/nova/network/manager.py index a85b6ba991..dae573feea 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -56,6 +56,7 @@ from nova import conductor from nova import context from nova import exception +from nova.i18n import _ from nova import ipv6 from nova import manager from nova.network import api as network_api @@ -67,7 +68,6 @@ from nova.objects import base as obj_base from nova.objects import quotas as quotas_obj from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import periodic_task diff --git a/nova/network/minidns.py b/nova/network/minidns.py index 6c1dce9ce3..2f9c388a5a 100644 --- a/nova/network/minidns.py +++ b/nova/network/minidns.py @@ -19,8 +19,8 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ from nova.network import dns_driver -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging CONF = cfg.CONF diff --git a/nova/network/model.py b/nova/network/model.py index 9febd55984..2af5e0542e 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -20,7 +20,7 @@ import six from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 2cc2123f9f..f26224de87 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -24,13 +24,13 @@ from nova.compute import utils as compute_utils from nova import conductor from nova import exception +from nova.i18n import _ from nova.network import base_api from nova.network import model as network_model from nova.network import neutronv2 from nova.network.neutronv2 import constants from nova.network.security_group import openstack_driver from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import uuidutils diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py index 2c07a2dc37..de88f13203 100644 --- a/nova/network/security_group/neutron_driver.py +++ b/nova/network/security_group/neutron_driver.py @@ -23,11 +23,11 @@ from nova.compute import api as compute_api from nova import exception +from nova.i18n import _ from nova.network import neutronv2 from nova.network.security_group import security_group_base from nova import objects from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import uuidutils from nova import utils diff --git a/nova/network/security_group/security_group_base.py b/nova/network/security_group/security_group_base.py index 6710b2d2af..f8a2aa1c8b 100644 --- a/nova/network/security_group/security_group_base.py +++ b/nova/network/security_group/security_group_base.py @@ -22,7 +22,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils CONF = cfg.CONF diff --git a/nova/notifications.py b/nova/notifications.py index f48542edfb..8c43e5959c 100644 --- a/nova/notifications.py +++ b/nova/notifications.py @@ -25,13 +25,13 @@ from nova.compute import flavors import nova.context from nova import db +from nova.i18n import _ from nova.image import glance from nova import network from nova.network import model as network_model from nova.objects import base as obj_base from nova.openstack.common import context as common_context from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log from nova.openstack.common import timeutils from nova import rpc diff --git a/nova/objects/base.py b/nova/objects/base.py index 79d07b2a0d..63a2705aaa 100644 --- a/nova/objects/base.py +++ b/nova/objects/base.py @@ -24,9 +24,9 @@ from nova import context from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import fields -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import versionutils diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py index 40d06e927e..18c2b6fda7 100644 --- a/nova/objects/block_device.py +++ b/nova/objects/block_device.py @@ -17,10 +17,10 @@ from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import base from nova.objects import fields -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging diff --git a/nova/objects/fields.py b/nova/objects/fields.py index 57936a28e9..fad6011c5e 100644 --- a/nova/objects/fields.py +++ b/nova/objects/fields.py @@ -19,8 +19,8 @@ import netaddr import six +from nova.i18n import _ from nova.network import model as network_model -from nova.openstack.common.gettextutils import _ from nova.openstack.common import timeutils diff --git a/nova/objects/instance.py b/nova/objects/instance.py index bd046304b5..5c7fe317ac 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -18,11 +18,11 @@ from nova.compute import flavors from nova import db from nova import exception +from nova.i18n import _ from nova import notifications from nova import objects from nova.objects import base from nova.objects import fields -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import utils diff --git a/nova/objects/instance_fault.py b/nova/objects/instance_fault.py index 9be7a4e81e..7c2b7a1fd8 100644 --- a/nova/objects/instance_fault.py +++ b/nova/objects/instance_fault.py @@ -18,10 +18,10 @@ from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception +from nova.i18n import _LE from nova import objects from nova.objects import base from nova.objects import fields -from nova.openstack.common.gettextutils import _LE from nova.openstack.common import log as logging diff --git a/nova/objects/instance_info_cache.py b/nova/objects/instance_info_cache.py index 1c6f57b123..10b128e0e8 100644 --- a/nova/objects/instance_info_cache.py +++ b/nova/objects/instance_info_cache.py @@ -16,9 +16,9 @@ from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception +from nova.i18n import _ from nova.objects import base from nova.objects import fields -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/pci/pci_manager.py b/nova/pci/pci_manager.py index bdbad0d745..e99117b881 100644 --- a/nova/pci/pci_manager.py +++ b/nova/pci/pci_manager.py @@ -20,9 +20,9 @@ from nova.compute import vm_states from nova import context from nova import exception +from nova.i18n import _ from nova.objects import instance from nova.objects import pci_device as pci_device_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.pci import pci_device from nova.pci import pci_request diff --git a/nova/quota.py b/nova/quota.py index bd3d364b8a..4e1644a178 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -23,8 +23,8 @@ from nova import db from nova import exception +from nova.i18n import _ from nova.objects import keypair as keypair_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index bb04eb9c3d..65a24030c0 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -25,7 +25,7 @@ from nova.compute import rpcapi as compute_rpcapi from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.scheduler import driver CONF = cfg.CONF diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index cb1942262f..3766ec2e85 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -27,8 +27,8 @@ from nova.compute import vm_states from nova import db from nova import exception +from nova.i18n import _ from nova import notifications -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 96883b08c8..415f46d32d 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -25,8 +25,8 @@ from nova.compute import rpcapi as compute_rpcapi from nova import exception +from nova.i18n import _ from nova.objects import instance_group as instance_group_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.pci import pci_request from nova import rpc diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py index 94aa2490bb..9b7022401d 100644 --- a/nova/scheduler/filters/compute_filter.py +++ b/nova/scheduler/filters/compute_filter.py @@ -15,7 +15,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.scheduler import filters from nova import servicegroup diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py index 45d97b9ba7..0c807c3fde 100644 --- a/nova/scheduler/filters/core_filter.py +++ b/nova/scheduler/filters/core_filter.py @@ -17,7 +17,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _LW +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.scheduler import filters from nova.scheduler.filters import utils diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py index 9afcceaa97..4677d2feb3 100644 --- a/nova/scheduler/filters/ram_filter.py +++ b/nova/scheduler/filters/ram_filter.py @@ -16,7 +16,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _LW +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.scheduler import filters from nova.scheduler.filters import utils diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index 29cd4f4f5e..af3c2f899b 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -50,7 +50,7 @@ from nova import context from nova import db -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/scheduler/filters/utils.py b/nova/scheduler/filters/utils.py index 580b2cb385..151811f4d9 100644 --- a/nova/scheduler/filters/utils.py +++ b/nova/scheduler/filters/utils.py @@ -15,8 +15,8 @@ """Bench of utility methods used by filters.""" +from nova.i18n import _LI from nova.objects import aggregate -from nova.openstack.common.gettextutils import _LI from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index 227efa30cd..9cea311ebf 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -26,7 +26,7 @@ from nova.compute import vm_states from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py index 7dbbcd56e8..48019d5187 100644 --- a/nova/scheduler/scheduler_options.py +++ b/nova/scheduler/scheduler_options.py @@ -26,8 +26,8 @@ from oslo.config import cfg +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py index 2d2f1618d0..2e7f4b67c5 100644 --- a/nova/scheduler/utils.py +++ b/nova/scheduler/utils.py @@ -22,9 +22,9 @@ from nova.compute import utils as compute_utils from nova import db from nova import exception +from nova.i18n import _ from nova import notifications from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import rpc diff --git a/nova/service.py b/nova/service.py index 8618e7c3e3..cdb0b1f117 100644 --- a/nova/service.py +++ b/nova/service.py @@ -29,8 +29,8 @@ from nova import context from nova import debugger from nova import exception +from nova.i18n import _ from nova.objects import base as objects_base -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py index 0016c5c53e..bbb0fd55fd 100644 --- a/nova/servicegroup/api.py +++ b/nova/servicegroup/api.py @@ -20,7 +20,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py index e7f74e7069..bf45d1ada5 100644 --- a/nova/servicegroup/drivers/db.py +++ b/nova/servicegroup/drivers/db.py @@ -18,7 +18,7 @@ from nova import conductor from nova import context -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.servicegroup import api diff --git a/nova/servicegroup/drivers/mc.py b/nova/servicegroup/drivers/mc.py index e83163ff73..3d643bb20c 100644 --- a/nova/servicegroup/drivers/mc.py +++ b/nova/servicegroup/drivers/mc.py @@ -21,7 +21,7 @@ from nova import conductor from nova import context -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova.openstack.common import timeutils diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py index a7a8b7b465..9ba3ae64f9 100644 --- a/nova/servicegroup/drivers/zk.py +++ b/nova/servicegroup/drivers/zk.py @@ -20,7 +20,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall diff --git a/nova/storage/linuxscsi.py b/nova/storage/linuxscsi.py index 09669fa565..f261094a13 100644 --- a/nova/storage/linuxscsi.py +++ b/nova/storage/linuxscsi.py @@ -14,7 +14,7 @@ """Generic linux scsi subsystem utilities.""" -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 8f6a6e5889..7631727caf 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -42,12 +42,12 @@ from nova import db from nova.db.sqlalchemy import models from nova import exception +from nova.i18n import _ from nova.image import glance from nova.network import manager from nova.network.neutronv2 import api as neutron_api from nova import objects from nova.objects import instance as instance_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import policy as common_policy from nova.openstack.common import timeutils diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 7913bff3a5..8168a169c9 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -43,12 +43,12 @@ from nova import db from nova.db.sqlalchemy import models from nova import exception +from nova.i18n import _ from nova.image import glance from nova.network import manager from nova.network.neutronv2 import api as neutron_api from nova import objects from nova.objects import instance as instance_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import policy as common_policy from nova.openstack.common import timeutils diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 86b7bb8829..a41e48f484 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -719,7 +719,7 @@ def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False): 'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})] -def fake_get_available_languages(domain): +def fake_get_available_languages(): existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US'] return existing_translations diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py index 81d4f860ff..889f79b57b 100644 --- a/nova/tests/api/openstack/test_faults.py +++ b/nova/tests/api/openstack/test_faults.py @@ -25,7 +25,7 @@ from nova.api.openstack import common from nova.api.openstack import wsgi from nova import exception -from nova.openstack.common import gettextutils +from nova import i18n from nova.openstack.common import jsonutils from nova import test @@ -33,32 +33,34 @@ class TestFaultWrapper(test.NoDBTestCase): """Tests covering `nova.api.openstack:FaultWrapper` class.""" - @mock.patch('nova.openstack.common.gettextutils.translate') - def test_safe_exception_translated(self, mock_translate): - msg = gettextutils.Message('Should be translated.', domain='nova') - safe_exception = exception.NotFound() - safe_exception.msg_fmt = msg + @mock.patch('oslo.i18n.translate') + @mock.patch('nova.i18n.get_available_languages') + def test_safe_exception_translated(self, mock_languages, mock_translate): + def fake_translate(value, locale): + return "I've been translated!" + + mock_translate.side_effect = fake_translate + + # Create an exception, passing a translatable message with a + # known value we can test for later. + safe_exception = exception.NotFound(i18n._('Should be translated.')) safe_exception.safe = True safe_exception.code = 404 req = webob.Request.blank('/') - def fake_translate(mesg, locale): - if mesg == "Should be translated.": - return "I've been translated!" - return mesg - - mock_translate.side_effect = fake_translate - def raiser(*args, **kwargs): raise safe_exception wrapper = nova.api.openstack.FaultWrapper(raiser) response = req.get_response(wrapper) + # The text of the exception's message attribute (replaced + # above with a non-default value) should be passed to + # translate(). + mock_translate.assert_any_call(u'Should be translated.', None) + # The return value from translate() should appear in the response. self.assertIn("I've been translated!", unicode(response.body)) - mock_translate.assert_any_call( - u'Should be translated.', None) class TestFaults(test.NoDBTestCase): @@ -175,7 +177,7 @@ def raiser(req): def test_raise_localize_explanation(self): msgid = "String with params: %s" params = ('blah', ) - lazy_gettext = gettextutils._ + lazy_gettext = i18n._ expl = lazy_gettext(msgid) % params @webob.dec.wsgify diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index ebbbf06646..e4adbeeea2 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -17,7 +17,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import exception -from nova.openstack.common import gettextutils +from nova import i18n from nova import test from nova.tests.api.openstack import fakes from nova.tests import utils @@ -132,7 +132,7 @@ def test_cache_and_retrieve_compute_nodes(self): 'id2': compute_nodes[2]}) def test_from_request(self): - self.stubs.Set(gettextutils, 'get_available_languages', + self.stubs.Set(i18n, 'get_available_languages', fakes.fake_get_available_languages) request = wsgi.Request.blank('/') @@ -143,7 +143,7 @@ def test_from_request(self): def test_asterisk(self): # asterisk should match first available if there # are not any other available matches - self.stubs.Set(gettextutils, 'get_available_languages', + self.stubs.Set(i18n, 'get_available_languages', fakes.fake_get_available_languages) request = wsgi.Request.blank('/') @@ -152,7 +152,7 @@ def test_asterisk(self): self.assertEqual(request.best_match_language(), 'en_GB') def test_prefix(self): - self.stubs.Set(gettextutils, 'get_available_languages', + self.stubs.Set(i18n, 'get_available_languages', fakes.fake_get_available_languages) request = wsgi.Request.blank('/') @@ -161,7 +161,7 @@ def test_prefix(self): self.assertEqual(request.best_match_language(), 'zh_CN') def test_secondary(self): - self.stubs.Set(gettextutils, 'get_available_languages', + self.stubs.Set(i18n, 'get_available_languages', fakes.fake_get_available_languages) request = wsgi.Request.blank('/') @@ -170,7 +170,7 @@ def test_secondary(self): self.assertEqual(request.best_match_language(), 'en_GB') def test_none_found(self): - self.stubs.Set(gettextutils, 'get_available_languages', + self.stubs.Set(i18n, 'get_available_languages', fakes.fake_get_available_languages) request = wsgi.Request.blank('/') @@ -179,7 +179,7 @@ def test_none_found(self): self.assertIs(request.best_match_language(), None) def test_no_lang_header(self): - self.stubs.Set(gettextutils, 'get_available_languages', + self.stubs.Set(i18n, 'get_available_languages', fakes.fake_get_available_languages) request = wsgi.Request.blank('/') diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py index 8197909192..992ba48942 100644 --- a/nova/tests/api/test_auth.py +++ b/nova/tests/api/test_auth.py @@ -19,7 +19,7 @@ import webob.exc import nova.api.auth -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common.middleware import request_id from nova import test diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index ce2c2d5c0f..eed33ea44d 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -51,6 +51,7 @@ from nova import context from nova import db from nova import exception +from nova.i18n import _ from nova.image import glance from nova.network import api as network_api from nova.network import model as network_model @@ -59,7 +60,6 @@ from nova.objects import base as obj_base from nova.objects import block_device as block_device_obj from nova.objects import instance as instance_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/tests/compute/test_keypairs.py b/nova/tests/compute/test_keypairs.py index 8b8f8c10b5..f2213af1f5 100644 --- a/nova/tests/compute/test_keypairs.py +++ b/nova/tests/compute/test_keypairs.py @@ -20,7 +20,7 @@ from nova import context from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import quota from nova.tests.compute import test_compute from nova.tests import fake_notifier diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index aed05d7b4b..b2cf0f3a16 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -53,8 +53,8 @@ import nova.db.sqlalchemy.migrate_repo from nova.db.sqlalchemy import utils as db_utils +from nova.i18n import _ from nova.openstack.common.db.sqlalchemy import utils as oslodbutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import test diff --git a/nova/tests/fake_ldap.py b/nova/tests/fake_ldap.py index e3e6d77080..5e3a1cc7a3 100644 --- a/nova/tests/fake_ldap.py +++ b/nova/tests/fake_ldap.py @@ -23,7 +23,7 @@ class definitions. It implements the minimum emulation of the python ldap import fnmatch -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py index 5318e86f0b..e37da85c85 100644 --- a/nova/tests/fake_volume.py +++ b/nova/tests/fake_volume.py @@ -17,7 +17,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index da80e5bd3b..0e80d98baf 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -17,7 +17,7 @@ import six.moves.urllib.parse as urlparse -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.tests.image import fake diff --git a/nova/tests/integrated/api_samples_test_base.py b/nova/tests/integrated/api_samples_test_base.py index 6a0f372460..8932adbee2 100644 --- a/nova/tests/integrated/api_samples_test_base.py +++ b/nova/tests/integrated/api_samples_test_base.py @@ -19,7 +19,7 @@ from lxml import etree import six -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova import test diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py index a7c75f8f6c..bf5264d2ca 100644 --- a/nova/tests/test_exception.py +++ b/nova/tests/test_exception.py @@ -18,7 +18,6 @@ from nova import context from nova import exception -from nova.openstack.common import gettextutils from nova import test @@ -140,16 +139,6 @@ def __unicode__(self): exc = FakeNovaException_Remote(lame_arg='lame') self.assertEqual(exc.format_message(), "some message %(somearg)s") - def test_format_message_gettext_msg_returned(self): - class FakeNovaException(exception.NovaException): - msg_fmt = gettextutils.Message("Some message %(param)s", - domain='nova') - - exc = FakeNovaException(param='blah') - msg = exc.format_message() - self.assertIsInstance(msg, gettextutils.Message) - self.assertEqual(msg, "Some message blah") - class ExceptionTestCase(test.NoDBTestCase): @staticmethod diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index 4877b135f3..50bb938d77 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -22,7 +22,7 @@ from nova import context from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import test from nova.tests.db import fakes as db_fakes from nova.tests.objects import test_network diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 9b6cf1e15e..04542c9c7e 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -34,8 +34,8 @@ from nova import context from nova import db from nova import exception +from nova.i18n import _ from nova.image import glance -from nova.openstack.common.gettextutils import _ from nova.openstack.common import units from nova import test from nova.tests import fake_network diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py index e3a3db978e..378e795848 100644 --- a/nova/tests/virt/libvirt/fakelibvirt.py +++ b/nova/tests/virt/libvirt/fakelibvirt.py @@ -17,7 +17,7 @@ import time import uuid -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ # Allow passing None to the various connect methods # (i.e. allow the client to rely on default URLs) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index 5f46abb581..deb576b90f 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -23,7 +23,7 @@ import pprint from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py index 2b957e1c5f..0a78b62c0c 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util.py +++ b/nova/tests/virt/vmwareapi/test_ds_util.py @@ -18,7 +18,7 @@ import mock from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import units from nova import test from nova.tests.virt.vmwareapi import fake diff --git a/nova/tests/virt/xenapi/image/test_bittorrent.py b/nova/tests/virt/xenapi/image/test_bittorrent.py index e3a70c5a24..2ebb52f79f 100644 --- a/nova/tests/virt/xenapi/image/test_bittorrent.py +++ b/nova/tests/virt/xenapi/image/test_bittorrent.py @@ -17,7 +17,7 @@ import pkg_resources from nova import context -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import test from nova.tests.virt.xenapi import stubs from nova.virt.xenapi import driver as xenapi_conn diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py index b302ebb82c..b8d1fca7de 100644 --- a/nova/tests/virt/xenapi/test_vm_utils.py +++ b/nova/tests/virt/xenapi/test_vm_utils.py @@ -27,7 +27,7 @@ from nova.compute import vm_mode from nova import context from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import processutils from nova.openstack.common import timeutils from nova.openstack.common import units diff --git a/nova/utils.py b/nova/utils.py index 887617941d..1ca10b8e1b 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -41,9 +41,8 @@ import six from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common import gettextutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging @@ -444,8 +443,6 @@ def utf8(value): """ if isinstance(value, unicode): return value.encode('utf-8') - elif isinstance(value, gettextutils.Message): - return unicode(value).encode('utf-8') assert isinstance(value, str) return value diff --git a/nova/version.py b/nova/version.py index 4d6faa1e08..7c2a71b39d 100644 --- a/nova/version.py +++ b/nova/version.py @@ -14,7 +14,7 @@ import pbr.version -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ NOVA_VENDOR = "OpenStack Foundation" NOVA_PRODUCT = "OpenStack Nova" diff --git a/nova/virt/baremetal/common.py b/nova/virt/baremetal/common.py index de0f6d8e0c..94165007e6 100644 --- a/nova/virt/baremetal/common.py +++ b/nova/virt/baremetal/common.py @@ -15,7 +15,7 @@ import paramiko from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py index 35e70a4be8..6fd916b0ab 100644 --- a/nova/virt/baremetal/db/sqlalchemy/api.py +++ b/nova/virt/baremetal/db/sqlalchemy/api.py @@ -26,8 +26,8 @@ import nova.context from nova.db.sqlalchemy import api as sqlalchemy_api from nova import exception +from nova.i18n import _ from nova.openstack.common.db import exception as db_exc -from nova.openstack.common.gettextutils import _ from nova.openstack.common import timeutils from nova.openstack.common import uuidutils from nova.virt.baremetal.db.sqlalchemy import models diff --git a/nova/virt/baremetal/db/sqlalchemy/migration.py b/nova/virt/baremetal/db/sqlalchemy/migration.py index 39212e668c..27beb89f16 100644 --- a/nova/virt/baremetal/db/sqlalchemy/migration.py +++ b/nova/virt/baremetal/db/sqlalchemy/migration.py @@ -22,7 +22,7 @@ import sqlalchemy from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.virt.baremetal.db.sqlalchemy import session INIT_VERSION = 0 diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index 705494145a..228267e300 100644 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -27,8 +27,8 @@ from nova.compute import task_states from nova import context as nova_context from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import lockutils diff --git a/nova/virt/baremetal/iboot_pdu.py b/nova/virt/baremetal/iboot_pdu.py index f32fa1d03e..16d037b38c 100644 --- a/nova/virt/baremetal/iboot_pdu.py +++ b/nova/virt/baremetal/iboot_pdu.py @@ -16,7 +16,7 @@ # iBoot Power Driver from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.virt.baremetal import baremetal_states diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py index f13b0de8a9..473ad58dc0 100644 --- a/nova/virt/baremetal/ipmi.py +++ b/nova/virt/baremetal/ipmi.py @@ -27,7 +27,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova import paths diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index f7c8ff644d..0d156f7ec9 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -26,10 +26,10 @@ from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova.objects import flavor as flavor_obj from nova.openstack.common.db import exception as db_exc from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import timeutils diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py index 67c562598b..7588a97ece 100644 --- a/nova/virt/baremetal/tilera.py +++ b/nova/virt/baremetal/tilera.py @@ -25,9 +25,9 @@ from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova.openstack.common.db import exception as db_exc from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.baremetal import baremetal_states diff --git a/nova/virt/baremetal/tilera_pdu.py b/nova/virt/baremetal/tilera_pdu.py index 87d8435bf4..dc84c00518 100644 --- a/nova/virt/baremetal/tilera_pdu.py +++ b/nova/virt/baremetal/tilera_pdu.py @@ -24,7 +24,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils diff --git a/nova/virt/baremetal/utils.py b/nova/virt/baremetal/utils.py index c54d3c14dd..0de4e27ae0 100644 --- a/nova/virt/baremetal/utils.py +++ b/nova/virt/baremetal/utils.py @@ -18,7 +18,7 @@ import os import shutil -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.disk import api as disk_api from nova.virt.libvirt import utils as libvirt_utils diff --git a/nova/virt/baremetal/vif_driver.py b/nova/virt/baremetal/vif_driver.py index 04255d08a3..f71cd39835 100644 --- a/nova/virt/baremetal/vif_driver.py +++ b/nova/virt/baremetal/vif_driver.py @@ -17,7 +17,7 @@ from nova import context from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.baremetal import db as bmdb diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py index 34e9e0e5bd..7ff6703409 100644 --- a/nova/virt/baremetal/virtual_power_driver.py +++ b/nova/virt/baremetal/virtual_power_driver.py @@ -19,7 +19,7 @@ from nova import context as nova_context from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py index 3cbe878770..f07e988e70 100644 --- a/nova/virt/baremetal/volume_driver.py +++ b/nova/virt/baremetal/volume_driver.py @@ -21,8 +21,8 @@ from nova import context as nova_context from nova import exception +from nova.i18n import _ from nova import network -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index 67b3064021..02e10be96a 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -16,10 +16,10 @@ import operator from nova import block_device +from nova.i18n import _ from nova import objects from nova.objects import base as obj_base from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.volume import encryptors diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index cdc103443a..9a067b9897 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -33,7 +33,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py index 37c50b5450..066842b18e 100644 --- a/nova/virt/disk/mount/api.py +++ b/nova/virt/disk/mount/api.py @@ -16,7 +16,7 @@ import os import time -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/disk/mount/loop.py b/nova/virt/disk/mount/loop.py index 7a0321a153..d0a157e742 100644 --- a/nova/virt/disk/mount/loop.py +++ b/nova/virt/disk/mount/loop.py @@ -13,7 +13,7 @@ # under the License. """Support for mounting images with the loop device.""" -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.disk.mount import api diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py index 7b2d0add94..80ab966058 100644 --- a/nova/virt/disk/mount/nbd.py +++ b/nova/virt/disk/mount/nbd.py @@ -20,7 +20,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.disk.mount import api diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py index c99dce829e..95e611561a 100644 --- a/nova/virt/disk/vfs/guestfs.py +++ b/nova/virt/disk/vfs/guestfs.py @@ -15,7 +15,7 @@ from eventlet import tpool from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.disk.vfs import api as vfs diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py index 314295c80a..242db7639e 100644 --- a/nova/virt/disk/vfs/localfs.py +++ b/nova/virt/disk/vfs/localfs.py @@ -16,8 +16,8 @@ import tempfile from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.disk.mount import loop diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 95459d1eba..f12a439678 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -24,7 +24,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/event.py b/nova/virt/event.py index 51db4cd1b9..02b9cddbd5 100644 --- a/nova/virt/event.py +++ b/nova/virt/event.py @@ -22,7 +22,7 @@ import time -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ EVENT_LIFECYCLE_STARTED = 0 EVENT_LIFECYCLE_STOPPED = 1 diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 21d74633b6..eba5b5164b 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -31,7 +31,7 @@ from nova.compute import task_states from nova import db from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index cd3c75b66c..ed3ff026af 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -19,12 +19,12 @@ from nova.compute import utils as compute_utils from nova import context +from nova.i18n import _ +from nova.i18n import _LI from nova.network import linux_net from nova import objects from nova.objects import security_group as security_group_obj from nova.objects import security_group_rule as security_group_rule_obj -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LI from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py index 2e5a1deca2..5670fd5bd1 100644 --- a/nova/virt/hardware.py +++ b/nova/virt/hardware.py @@ -17,7 +17,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging virt_cpu_opts = [ diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py index 359417e500..404d8536c6 100644 --- a/nova/virt/hyperv/basevolumeutils.py +++ b/nova/virt/hyperv/basevolumeutils.py @@ -28,7 +28,7 @@ import wmi from nova import block_device -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt import driver diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index daea18b959..dc0f2fa889 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -17,7 +17,7 @@ A Hyper-V Nova Compute driver. """ -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.hyperv import hostops diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py index c4b86e2b6c..88eacb14fd 100644 --- a/nova/virt/hyperv/imagecache.py +++ b/nova/virt/hyperv/imagecache.py @@ -20,8 +20,8 @@ from oslo.config import cfg from nova.compute import flavors +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py index d8bc81e7f1..5411967498 100644 --- a/nova/virt/hyperv/livemigrationops.py +++ b/nova/virt/hyperv/livemigrationops.py @@ -20,8 +20,8 @@ from oslo.config import cfg +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.hyperv import imagecache from nova.virt.hyperv import utilsfactory diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py index 219b4e10ca..579965897e 100644 --- a/nova/virt/hyperv/livemigrationutils.py +++ b/nova/virt/hyperv/livemigrationutils.py @@ -19,7 +19,7 @@ import wmi from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.hyperv import vmutils from nova.virt.hyperv import vmutilsv2 diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py index e883fba1e6..31e3c101cc 100644 --- a/nova/virt/hyperv/migrationops.py +++ b/nova/virt/hyperv/migrationops.py @@ -18,8 +18,8 @@ """ import os +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova.virt.hyperv import imagecache diff --git a/nova/virt/hyperv/networkutils.py b/nova/virt/hyperv/networkutils.py index 2b45343b8a..27571485cd 100644 --- a/nova/virt/hyperv/networkutils.py +++ b/nova/virt/hyperv/networkutils.py @@ -23,7 +23,7 @@ if sys.platform == 'win32': import wmi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.virt.hyperv import vmutils diff --git a/nova/virt/hyperv/networkutilsv2.py b/nova/virt/hyperv/networkutilsv2.py index c3ec6a3497..558f7c44cd 100644 --- a/nova/virt/hyperv/networkutilsv2.py +++ b/nova/virt/hyperv/networkutilsv2.py @@ -24,7 +24,7 @@ if sys.platform == 'win32': import wmi -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.virt.hyperv import networkutils from nova.virt.hyperv import vmutils diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py index 853281899d..02937689c6 100644 --- a/nova/virt/hyperv/pathutils.py +++ b/nova/virt/hyperv/pathutils.py @@ -18,7 +18,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py index a103557579..0c604b46fc 100644 --- a/nova/virt/hyperv/snapshotops.py +++ b/nova/virt/hyperv/snapshotops.py @@ -21,8 +21,8 @@ from oslo.config import cfg from nova.compute import task_states +from nova.i18n import _ from nova.image import glance -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.hyperv import utilsfactory diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py index a7c6502720..801533ba80 100644 --- a/nova/virt/hyperv/vhdutils.py +++ b/nova/virt/hyperv/vhdutils.py @@ -31,7 +31,7 @@ from xml.etree import ElementTree -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.virt.hyperv import constants from nova.virt.hyperv import vmutils diff --git a/nova/virt/hyperv/vhdutilsv2.py b/nova/virt/hyperv/vhdutilsv2.py index 44a0f7663f..8d865aac24 100644 --- a/nova/virt/hyperv/vhdutilsv2.py +++ b/nova/virt/hyperv/vhdutilsv2.py @@ -26,7 +26,7 @@ from xml.etree import ElementTree -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import units from nova.virt.hyperv import constants from nova.virt.hyperv import vhdutils diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index ca5286d9f9..177696d6cb 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -24,8 +24,8 @@ from nova.api.metadata import base as instance_metadata from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index e036ba89ff..1b71248d4e 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -28,7 +28,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.hyperv import constants diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index 7b3b0598fb..1e9df559e2 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -22,8 +22,8 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.hyperv import utilsfactory diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py index 2740da2911..ccd890daef 100644 --- a/nova/virt/hyperv/volumeutils.py +++ b/nova/virt/hyperv/volumeutils.py @@ -28,7 +28,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.hyperv import basevolumeutils diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py index be97b1a4ab..ae2a7f6b2e 100644 --- a/nova/virt/hyperv/volumeutilsv2.py +++ b/nova/virt/hyperv/volumeutilsv2.py @@ -26,7 +26,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.hyperv import basevolumeutils diff --git a/nova/virt/images.py b/nova/virt/images.py index 23b2a52426..fc6cd8423c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -24,9 +24,9 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ from nova import image from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import imageutils from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py index 746bc6908f..27800eceed 100644 --- a/nova/virt/libvirt/blockinfo.py +++ b/nova/virt/libvirt/blockinfo.py @@ -77,8 +77,8 @@ from nova import block_device from nova.compute import flavors from nova import exception +from nova.i18n import _ from nova.objects import base as obj_base -from nova.openstack.common.gettextutils import _ from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt import driver diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 914ea95f7b..053b267acd 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -56,16 +56,16 @@ from nova.compute import vm_mode from nova import context as nova_context from nova import exception +from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LI +from nova.i18n import _LW from nova import image from nova import objects from nova.objects import flavor as flavor_obj from nova.objects import service as service_obj from nova.openstack.common import excutils from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE -from nova.openstack.common.gettextutils import _LI -from nova.openstack.common.gettextutils import _LW from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index f36e7946ff..f935c7163e 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -18,8 +18,8 @@ from oslo.config import cfg from nova.cloudpipe import pipelib -from nova.openstack.common.gettextutils import _LI -from nova.openstack.common.gettextutils import _LW +from nova.i18n import _LI +from nova.i18n import _LW from nova.openstack.common import log as logging import nova.virt.firewall as base_firewall from nova.virt import netutils diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 4d73d536d1..4caf9e0001 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -21,10 +21,10 @@ import six from nova import exception +from nova.i18n import _ +from nova.i18n import _LE from nova.openstack.common import excutils from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index 0542b431a4..7a3282f1ef 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -28,10 +28,10 @@ from oslo.config import cfg +from nova.i18n import _LE +from nova.i18n import _LI +from nova.i18n import _LW from nova.openstack.common import fileutils -from nova.openstack.common.gettextutils import _LE -from nova.openstack.common.gettextutils import _LI -from nova.openstack.common.gettextutils import _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/virt/libvirt/lvm.py b/nova/virt/libvirt/lvm.py index 0671a5086b..1ef455eb83 100644 --- a/nova/virt/libvirt/lvm.py +++ b/nova/virt/libvirt/lvm.py @@ -22,9 +22,9 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE -from nova.openstack.common.gettextutils import _LW +from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import units diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 54ac1d3283..af0cd40f97 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -25,9 +25,9 @@ from lxml import etree from oslo.config import cfg -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LI -from nova.openstack.common.gettextutils import _LW +from nova.i18n import _ +from nova.i18n import _LI +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 2532e786ca..1da2b35d3f 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -22,11 +22,11 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LW from nova.network import linux_net from nova.network import model as network_model -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE -from nova.openstack.common.gettextutils import _LW from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index 1f4f85cb03..cb5c6d9746 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -26,9 +26,9 @@ import six.moves.urllib.parse as urlparse from nova import exception -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LE -from nova.openstack.common.gettextutils import _LW +from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils diff --git a/nova/virt/storage_users.py b/nova/virt/storage_users.py index 752f07efc0..58e7f58498 100644 --- a/nova/virt/storage_users.py +++ b/nova/virt/storage_users.py @@ -19,7 +19,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 313ef5f87c..da690f53a7 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -28,8 +28,7 @@ import suds from nova import exception -from nova.openstack.common.gettextutils import _ -from nova.openstack.common.gettextutils import _LC +from nova.i18n import _, _LC from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index cc76b9a700..a91e732c55 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -18,7 +18,7 @@ import posixpath from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vim_util diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py index ba01c931d3..89ce19df16 100644 --- a/nova/virt/vmwareapi/error_util.py +++ b/nova/virt/vmwareapi/error_util.py @@ -17,7 +17,7 @@ Exception classes and SOAP response error checking module. """ from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/virt/vmwareapi/imagecache.py b/nova/virt/vmwareapi/imagecache.py index 617c78833b..11d300e13b 100644 --- a/nova/virt/vmwareapi/imagecache.py +++ b/nova/virt/vmwareapi/imagecache.py @@ -37,7 +37,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py index 5c79df8772..10132dfee0 100644 --- a/nova/virt/vmwareapi/io_util.py +++ b/nova/virt/vmwareapi/io_util.py @@ -24,8 +24,8 @@ from eventlet import queue from nova import exception +from nova.i18n import _ from nova import image -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) diff --git a/nova/virt/vmwareapi/network_util.py b/nova/virt/vmwareapi/network_util.py index 565c45db4c..20d9596ae6 100644 --- a/nova/virt/vmwareapi/network_util.py +++ b/nova/virt/vmwareapi/network_util.py @@ -19,7 +19,7 @@ """ from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vim_util diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index f611ccf20a..2c50b4a694 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -18,7 +18,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import network_util diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py index f8eb76f203..d4aa456c2b 100644 --- a/nova/virt/vmwareapi/vim.py +++ b/nova/virt/vmwareapi/vim.py @@ -24,7 +24,7 @@ from oslo.config import cfg import suds -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova import utils from nova.virt.vmwareapi import error_util diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py index 313d35ffc7..e37c6cddf7 100644 --- a/nova/virt/vmwareapi/vim_util.py +++ b/nova/virt/vmwareapi/vim_util.py @@ -19,7 +19,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging vmware_opts = cfg.IntOpt('maximum_objects', default=100, diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index f6e3e7adf2..7a60242b23 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -24,8 +24,8 @@ from oslo.config import cfg from nova import exception +from nova.i18n import _ from nova.network import model as network_model -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 9d7093c7a4..2c5b34523a 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -32,8 +32,8 @@ from nova.compute import vm_states from nova import context as nova_context from nova import exception +from nova.i18n import _, _LE from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _, _LE from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import strutils diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py index c332402d82..6a5e1c5790 100644 --- a/nova/virt/vmwareapi/volumeops.py +++ b/nova/virt/vmwareapi/volumeops.py @@ -20,7 +20,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py index df47d29810..00d9e27389 100644 --- a/nova/virt/xenapi/agent.py +++ b/nova/virt/xenapi/agent.py @@ -28,8 +28,8 @@ from nova import context from nova import crypto from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import strutils diff --git a/nova/virt/xenapi/client/session.py b/nova/virt/xenapi/client/session.py index 139e1c184c..1dc5b4446e 100644 --- a/nova/virt/xenapi/client/session.py +++ b/nova/virt/xenapi/client/session.py @@ -25,8 +25,8 @@ from nova import context from nova import exception +from nova.i18n import _ from nova import objects -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import versionutils from nova import utils diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 77c962ddde..9afab4d1a1 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -40,7 +40,7 @@ from oslo.config import cfg import six.moves.urllib.parse as urlparse -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index eabe139aec..0c8e693595 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -57,7 +57,7 @@ import zlib from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py index afa484a80c..c842df8a7b 100644 --- a/nova/virt/xenapi/host.py +++ b/nova/virt/xenapi/host.py @@ -25,9 +25,9 @@ from nova.compute import vm_states from nova import context from nova import exception +from nova.i18n import _ from nova import objects from nova.objects import service as service_obj -from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.pci import pci_whitelist diff --git a/nova/virt/xenapi/image/bittorrent.py b/nova/virt/xenapi/image/bittorrent.py index a77775d113..d400feef5b 100644 --- a/nova/virt/xenapi/image/bittorrent.py +++ b/nova/virt/xenapi/image/bittorrent.py @@ -17,7 +17,7 @@ import pkg_resources import six.moves.urllib.parse as urlparse -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ import nova.openstack.common.log as logging from nova.virt.xenapi import vm_utils diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index cfa983fbcb..d838f69886 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -19,7 +19,7 @@ """ from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ def find_network_with_name_label(session, name_label): diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index 9867dbad72..f8d2e4f927 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -22,7 +22,7 @@ from nova.compute import rpcapi as compute_rpcapi from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.virt.xenapi import pool_states diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py index a763718d3c..74408cb3c3 100644 --- a/nova/virt/xenapi/vif.py +++ b/nova/virt/xenapi/vif.py @@ -19,7 +19,7 @@ from oslo.config import cfg -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.virt.xenapi import network_utils from nova.virt.xenapi import vm_utils diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a655b1dc8c..e810b4a846 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -37,9 +37,9 @@ from nova.compute import task_states from nova.compute import vm_mode from nova import exception +from nova.i18n import _, _LI from nova.network import model as network_model from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _, _LI from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c7db7ea992..4338b520e2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -35,9 +35,9 @@ from nova.compute import vm_states from nova import context as nova_context from nova import exception +from nova.i18n import _ from nova import objects from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index d1a01d58f2..96b261a178 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -25,7 +25,7 @@ from oslo.config import cfg from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging xenapi_volume_utils_opts = [ diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5eb28165af..70adbbb683 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -18,8 +18,8 @@ """ from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volume_utils diff --git a/nova/vnc/xvp_proxy.py b/nova/vnc/xvp_proxy.py index 5ab95c63a0..038d2c4675 100644 --- a/nova/vnc/xvp_proxy.py +++ b/nova/vnc/xvp_proxy.py @@ -27,7 +27,7 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova import version from nova import wsgi diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 2ba00aa811..87ad68f57d 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -28,7 +28,7 @@ from nova import availability_zones as az from nova import exception -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import log as logging from nova.openstack.common import strutils diff --git a/nova/volume/encryptors/__init__.py b/nova/volume/encryptors/__init__.py index 79879d2a23..8c87a9e768 100644 --- a/nova/volume/encryptors/__init__.py +++ b/nova/volume/encryptors/__init__.py @@ -14,7 +14,7 @@ # under the License. -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.volume.encryptors import nop diff --git a/nova/wsgi.py b/nova/wsgi.py index f538bcd771..4f9a95bd9c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -34,8 +34,8 @@ import webob.exc from nova import exception +from nova.i18n import _ from nova.openstack.common import excutils -from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging wsgi_opts = [ diff --git a/requirements.txt b/requirements.txt index 6949d82614..2d9ab4cab1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,3 +34,4 @@ oslo.config>=1.2.1 oslo.rootwrap pycadf>=0.5.1 oslo.messaging>=1.3.0 +oslo.i18n>=0.1.0 diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py index 9e88f4f22c..9e441f4834 100755 --- a/tools/db/schema_diff.py +++ b/tools/db/schema_diff.py @@ -49,7 +49,7 @@ import subprocess import sys -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ ### Dump diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py index 4c830b05d4..c472d6cbb1 100644 --- a/tools/esx/guest_tool.py +++ b/tools/esx/guest_tool.py @@ -28,7 +28,7 @@ import sys import time -from nova.openstack.common.gettextutils import _ +from nova.i18n import _ PLATFORM_WIN = 'win32' diff --git a/tox.ini b/tox.ini index daeebd56d2..61e16ea0b0 100644 --- a/tox.ini +++ b/tox.ini @@ -66,4 +66,4 @@ exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,t [hacking] local-check-factory = nova.hacking.checks.factory -import_exceptions = nova.openstack.common.gettextutils +import_exceptions = nova.i18n From 6c6648042a3a22202ffcb03428f7689db20b03b8 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Sat, 19 Jul 2014 02:33:40 +0800 Subject: [PATCH 083/486] Remove warn log for over quota Over quota exception is already raised at API layer and no need to print a warning log. Change-Id: If44d5f2dd0e11d15d57cf6ab247f5e9e7fe75a9c --- nova/compute/api.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 1ab06c35d3..dc257d9dcf 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -400,10 +400,6 @@ def _check_metadata_properties_quota(self, context, metadata=None): try: QUOTAS.limit_check(context, metadata_items=num_metadata) except exception.OverQuota as exc: - LOG.warn(_("Quota exceeded for %(pid)s, tried to set " - "%(num_metadata)s metadata properties"), - {'pid': context.project_id, - 'num_metadata': num_metadata}) quota_metadata = exc.kwargs['quotas']['metadata_items'] raise exception.MetadataLimitExceeded(allowed=quota_metadata) From 8cf7430eaad5a0e52096200850032d6692381d3b Mon Sep 17 00:00:00 2001 From: jichenjc Date: Fri, 11 Jul 2014 07:46:59 +0800 Subject: [PATCH 084/486] Fix error status code for agents When passing bad body in a request, most APIs return BadRequest response. However, agents API doesn't do it. This patch fixes the error status code and adds a unit test related to this change. This patch's idea most came from Ken'ichi Ohmichi's patch https://review.openstack.org/#/c/107266/ Change-Id: I9602183ade4d3c0105e2498aebb7e33eb72f86f0 --- nova/api/openstack/compute/contrib/agents.py | 16 ++++++----- .../openstack/compute/contrib/test_agents.py | 27 +++++++++++++++++++ 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/compute/contrib/agents.py b/nova/api/openstack/compute/contrib/agents.py index c69159c5ab..6ad4ca064d 100644 --- a/nova/api/openstack/compute/contrib/agents.py +++ b/nova/api/openstack/compute/contrib/agents.py @@ -19,6 +19,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import exception +from nova.i18n import _ from nova import objects from nova import utils @@ -95,8 +96,9 @@ def update(self, req, id, body): url = para['url'] md5hash = para['md5hash'] version = para['version'] - except (TypeError, KeyError): - raise webob.exc.HTTPUnprocessableEntity() + except (TypeError, KeyError) as ex: + msg = _("Invalid request body: %s") % unicode(ex) + raise webob.exc.HTTPBadRequest(explanation=msg) try: utils.check_string_length(url, 'url', max_length=255) @@ -112,8 +114,9 @@ def update(self, req, id, body): agent.url = url agent.md5hash = md5hash agent.save() - except ValueError: - raise webob.exc.HTTPUnprocessableEntity() + except ValueError as ex: + msg = _("Invalid request body: %s") % unicode(ex) + raise webob.exc.HTTPBadRequest(explanation=msg) except exception.AgentBuildNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.format_message()) @@ -149,8 +152,9 @@ def create(self, req, body): version = agent['version'] url = agent['url'] md5hash = agent['md5hash'] - except (TypeError, KeyError): - raise webob.exc.HTTPUnprocessableEntity() + except (TypeError, KeyError) as ex: + msg = _("Invalid request body: %s") % unicode(ex) + raise webob.exc.HTTPBadRequest(explanation=msg) try: utils.check_string_length(hypervisor, 'hypervisor', max_length=255) diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/api/openstack/compute/contrib/test_agents.py index 5036af93d7..8cc25e6854 100644 --- a/nova/tests/api/openstack/compute/contrib/test_agents.py +++ b/nova/tests/api/openstack/compute/contrib/test_agents.py @@ -117,6 +117,17 @@ def test_agents_create(self): res_dict = self.controller.create(req, body) self.assertEqual(res_dict, response) + def test_agents_create_key_error(self): + req = FakeRequest() + body = {'agent': {'hypervisordummy': 'kvm', + 'os': 'win', + 'architecture': 'x86', + 'version': '7.0', + 'url': 'xxx://xxxx/xxx/xxx', + 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + def _test_agents_create_with_invalid_length(self, key): req = FakeRequest() body = {'agent': {'hypervisor': 'kvm', @@ -211,6 +222,22 @@ def test_agents_update(self): res_dict = self.controller.update(req, 1, body) self.assertEqual(res_dict, response) + def test_agents_update_key_error(self): + req = FakeRequest() + body = {'para': {'versiondummy': '7.0', + 'url': 'xxx://xxxx/xxx/xxx', + 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, 1, body) + + def test_agents_update_value_error(self): + req = FakeRequest() + body = {'para': {'version': '7.0', + 'url': 1111, + 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, 1, body) + def _test_agents_update_with_invalid_length(self, key): req = FakeRequest() body = {'para': {'version': '7.0', From 9428ba0a0ac09d943f026cb21e97d57625d1b85d Mon Sep 17 00:00:00 2001 From: jichenjc Date: Sat, 19 Jul 2014 09:06:14 +0800 Subject: [PATCH 085/486] Remove translation for debug message Remove translation for debug message in neutron api layer. Change-Id: I5b3ce1115d8f2c46f6c144f21e735a45f338cb32 --- nova/network/neutronv2/api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f26224de87..a3938ce50c 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -573,8 +573,8 @@ def add_fixed_ip_to_instance(self, context, instance, network_id): port_req_body) return self._get_instance_nw_info(context, instance) except Exception as ex: - msg = _("Unable to update port %(portid)s on subnet " - "%(subnet_id)s with failure: %(exception)s") + msg = ("Unable to update port %(portid)s on subnet " + "%(subnet_id)s with failure: %(exception)s") LOG.debug(msg, {'portid': p['id'], 'subnet_id': subnet['id'], 'exception': ex}) @@ -602,8 +602,8 @@ def remove_fixed_ip_from_instance(self, context, instance, address): neutronv2.get_client(context).update_port(p['id'], port_req_body) except Exception as ex: - msg = _("Unable to update port %(portid)s with" - " failure: %(exception)s") + msg = ("Unable to update port %(portid)s with" + " failure: %(exception)s") LOG.debug(msg, {'portid': p['id'], 'exception': ex}) return self._get_instance_nw_info(context, instance) From 752873ff0529e028529b0626f57a165c08b0b81f Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 15 Jul 2014 17:19:49 -0700 Subject: [PATCH 086/486] Remove stubs in favor of mock in test_policy This patch converts tests_policy to use mock instead of stubs. I was stealing some tests from nova to another openstack project and made this change there so I figured nova would benefit from it too. Change-Id: I6d086795c559ac8f04028d820e8e88bfd5305426 --- nova/tests/test_policy.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py index 5c6d375f9b..a7174b1384 100644 --- a/nova/tests/test_policy.py +++ b/nova/tests/test_policy.py @@ -18,6 +18,7 @@ import os.path import StringIO +import mock import six.moves.urllib.request as urlrequest from nova import context @@ -99,21 +100,17 @@ def test_enforce_good_action(self): result = policy.enforce(self.context, action, self.target) self.assertEqual(result, True) - def test_enforce_http_true(self): - - def fakeurlopen(url, post_data): - return StringIO.StringIO("True") - self.stubs.Set(urlrequest, 'urlopen', fakeurlopen) + @mock.patch.object(urlrequest, 'urlopen', + return_value=StringIO.StringIO("True")) + def test_enforce_http_true(self, mock_urlrequest): action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertEqual(result, True) - def test_enforce_http_false(self): - - def fakeurlopen(url, post_data): - return StringIO.StringIO("False") - self.stubs.Set(urlrequest, 'urlopen', fakeurlopen) + @mock.patch.object(urlrequest, 'urlopen', + return_value=StringIO.StringIO("False")) + def test_enforce_http_false(self, mock_urlrequest): action = "example:get_http" target = {} self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, From 2c75ccefb1d823403128760647c9e124811660cb Mon Sep 17 00:00:00 2001 From: jichenjc Date: Fri, 11 Jul 2014 15:19:18 +0800 Subject: [PATCH 087/486] Remove unnecessary error log in cell API There are unnnecessary error logs in cell API. It tried to log error for invalid input which is not necessary because we already raises exception for the bad request. This patch removes them in both v2/v3 API. Change-Id: Idd239903aa830f5d8fe50336dfa63a43b617584f --- nova/api/openstack/compute/contrib/cells.py | 8 -------- nova/api/openstack/compute/plugins/v3/cells.py | 8 -------- 2 files changed, 16 deletions(-) diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py index fa2661f10c..dc4a82150d 100644 --- a/nova/api/openstack/compute/contrib/cells.py +++ b/nova/api/openstack/compute/contrib/cells.py @@ -29,13 +29,11 @@ from nova.compute import api as compute from nova import exception from nova.i18n import _ -from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils from nova import rpc -LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('name', 'nova.cells.opts', group='cells') CONF.import_opt('capabilities', 'nova.cells.opts', group='cells') @@ -283,18 +281,15 @@ def _validate_cell_name(self, cell_name): """Validate cell name is not empty and doesn't contain '!' or '.'.""" if not cell_name: msg = _("Cell name cannot be empty") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) if '!' in cell_name or '.' in cell_name: msg = _("Cell name cannot contain '!' or '.'") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) def _validate_cell_type(self, cell_type): """Validate cell_type is 'parent' or 'child'.""" if cell_type not in ['parent', 'child']: msg = _("Cell type must be 'parent' or 'child'") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) def _normalize_cell(self, cell, existing=None): @@ -350,12 +345,10 @@ def create(self, req, body): authorize(context) if 'cell' not in body: msg = _("No cell information in request") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] if 'name' not in cell: msg = _("No cell name in request") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) self._validate_cell_name(cell['name']) self._normalize_cell(cell) @@ -374,7 +367,6 @@ def update(self, req, id, body): authorize(context) if 'cell' not in body: msg = _("No cell information in request") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] cell.pop('id', None) diff --git a/nova/api/openstack/compute/plugins/v3/cells.py b/nova/api/openstack/compute/plugins/v3/cells.py index 2c7c9eae6f..8dc46c4e83 100644 --- a/nova/api/openstack/compute/plugins/v3/cells.py +++ b/nova/api/openstack/compute/plugins/v3/cells.py @@ -28,13 +28,11 @@ from nova.compute import api as compute from nova import exception from nova.i18n import _ -from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils from nova import rpc -LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('name', 'nova.cells.opts', group='cells') CONF.import_opt('capabilities', 'nova.cells.opts', group='cells') @@ -190,18 +188,15 @@ def _validate_cell_name(self, cell_name): """Validate cell name is not empty and doesn't contain '!' or '.'.""" if not cell_name: msg = _("Cell name cannot be empty") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) if '!' in cell_name or '.' in cell_name: msg = _("Cell name cannot contain '!' or '.'") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) def _validate_cell_type(self, cell_type): """Validate cell_type is 'parent' or 'child'.""" if cell_type not in ['parent', 'child']: msg = _("Cell type must be 'parent' or 'child'") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) def _normalize_cell(self, cell, existing=None): @@ -257,12 +252,10 @@ def create(self, req, body): authorize(context) if 'cell' not in body: msg = _("No cell information in request") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] if 'name' not in cell: msg = _("No cell name in request") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) self._validate_cell_name(cell['name']) self._normalize_cell(cell) @@ -280,7 +273,6 @@ def update(self, req, id, body): authorize(context) if 'cell' not in body: msg = _("No cell information in request") - LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] cell.pop('id', None) From 77a7d14542600f2badcdf048fe6b586a0ff27e30 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Sat, 19 Jul 2014 18:47:19 +0800 Subject: [PATCH 088/486] Commit quota when deallocate floating ip It seems that when you allocate a floating-ip in a tenant with nova-network, its quota is never returned after calling 'nova floating-ip-delete' even though 'nova floating-ip-list' shows it gone. This behavior applies to each tenant individually. The gate tests are passing because they all run with tenant isolation. The root cause of the problem is cooperation between commit 23a27e47 and cbbb9de5. db layer code return floatingip_ref but object layer didn't. This patch fixed the problem by adding return value from object layer. Change-Id: Ide1a338b6c33676311028e8738150e146324a8ee Closes-Bug: #1347156 --- nova/objects/floating_ip.py | 2 +- nova/tests/network/test_manager.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/nova/objects/floating_ip.py b/nova/objects/floating_ip.py index d74ba424e7..da035a424d 100644 --- a/nova/objects/floating_ip.py +++ b/nova/objects/floating_ip.py @@ -106,7 +106,7 @@ def associate(cls, context, floating_address, fixed_address, host): @obj_base.remotable_classmethod def deallocate(cls, context, address): - db.floating_ip_deallocate(context, address) + return db.floating_ip_deallocate(context, address) @obj_base.remotable_classmethod def destroy(cls, context, address): diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index e2f62e2617..c3b0cf0a45 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -1101,12 +1101,14 @@ def fake_allocate_address(*args, **kwargs): self.network.allocate_floating_ip(ctxt, ctxt.project_id) - def test_deallocate_floating_ip(self): + @mock.patch('nova.quota.QUOTAS.reserve') + @mock.patch('nova.quota.QUOTAS.commit') + def test_deallocate_floating_ip(self, mock_commit, mock_reserve): ctxt = context.RequestContext('testuser', 'testproject', is_admin=False) def fake1(*args, **kwargs): - pass + return dict(test_floating_ip.fake_floating_ip) def fake2(*args, **kwargs): return dict(test_floating_ip.fake_floating_ip, @@ -1127,10 +1129,14 @@ def fake3(*args, **kwargs): ctxt, mox.IgnoreArg()) + mock_reserve.return_value = 'reserve' # this time should not raise self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3) self.network.deallocate_floating_ip(ctxt, ctxt.project_id) + mock_commit.assert_called_once_with(ctxt, 'reserve', + project_id='testproject') + @mock.patch('nova.db.fixed_ip_get') def test_associate_floating_ip(self, fixed_get): ctxt = context.RequestContext('testuser', 'testproject', From dd6fb1246ff2789bd78b772b45e1fcac21eda67a Mon Sep 17 00:00:00 2001 From: jichenjc Date: Wed, 18 Jun 2014 04:14:09 +0800 Subject: [PATCH 089/486] Keep resizing&resized instances when compute init During compute manager startup init_host is called. One of the functions there is to delete instance data that doesn't belong to this host i.e. _destroy_evacuated_instances. But this function only checks if the local instance belongs to the host or not. It doesn't check the task_state or vm_state. In Resize function, user may want to revert or confirm the resize operations so the instance on source and dest compute node should be kept. so for RESIZE_MIGRATING, RESIZE_MIGRATED task states and RESIZED vm state instances, they should be kept in compute node when the compute restart. This patch adds check for the task state and vm state before delete the instances. Closes-Bug: #1330503 Change-Id: I723fa4a8823019391ea83aa189096531032adab1 --- nova/compute/manager.py | 17 +++++++++++----- nova/tests/compute/test_compute_mgr.py | 27 ++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 996ae29718..02b3e8844d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -684,21 +684,28 @@ def _destroy_evacuated_instances(self, context): evacuated to another host. Check that the instances reported by the driver are still associated with this host. If they are not, destroy them, with the exception of instances which are in - the MIGRATING state. + the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH + task state or RESIZED vm state. """ our_host = self.host filters = {'deleted': False} local_instances = self._get_instances_on_driver(context, filters) for instance in local_instances: if instance.host != our_host: - if instance.task_state in [task_states.MIGRATING]: + if (instance.task_state in [task_states.MIGRATING, + task_states.RESIZE_MIGRATING, + task_states.RESIZE_MIGRATED, + task_states.RESIZE_FINISH] + or instance.vm_state in [vm_states.RESIZED]): LOG.debug('Will not delete instance as its host (' '%(instance_host)s) is not equal to our ' - 'host (%(our_host)s) but its state is ' - '(%(task_state)s)', + 'host (%(our_host)s) but its task state is ' + '(%(task_state)s) and vm state is ' + '(%(vm_state)s)', {'instance_host': instance.host, 'our_host': our_host, - 'task_state': instance.task_state}, + 'task_state': instance.task_state, + 'vm_state': instance.vm_state}, instance=instance) continue LOG.info(_('Deleting instance as its host (' diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 1fcfec5d53..99ff621423 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1509,15 +1509,18 @@ def test_set_admin_password_driver_not_implemented(self): self._do_test_set_admin_password_driver_error( exc, vm_states.ACTIVE, None, expected_exception) - def test_init_host_with_partial_migration(self): + def _test_init_host_with_partial_migration(self, task_state=None, + vm_state=vm_states.ACTIVE): our_host = self.compute.host instance_1 = objects.Instance(self.context) instance_1.uuid = 'foo' - instance_1.task_state = task_states.MIGRATING + instance_1.task_state = task_state + instance_1.vm_state = vm_state instance_1.host = 'not-' + our_host instance_2 = objects.Instance(self.context) instance_2.uuid = 'bar' instance_2.task_state = None + instance_2.vm_state = vm_states.ACTIVE instance_2.host = 'not-' + our_host with contextlib.nested( @@ -1538,6 +1541,26 @@ def test_init_host_with_partial_migration(self): destroy.assert_called_once_with(self.context, instance_2, None, {}, True) + def test_init_host_with_partial_migration_migrating(self): + self._test_init_host_with_partial_migration( + task_state=task_states.MIGRATING) + + def test_init_host_with_partial_migration_resize_migrating(self): + self._test_init_host_with_partial_migration( + task_state=task_states.RESIZE_MIGRATING) + + def test_init_host_with_partial_migration_resize_migrated(self): + self._test_init_host_with_partial_migration( + task_state=task_states.RESIZE_MIGRATED) + + def test_init_host_with_partial_migration_finish_resize(self): + self._test_init_host_with_partial_migration( + task_state=task_states.RESIZE_FINISH) + + def test_init_host_with_partial_migration_resized(self): + self._test_init_host_with_partial_migration( + vm_state=vm_states.RESIZED) + @mock.patch('nova.compute.manager.ComputeManager._instance_update') def test_error_out_instance_on_exception_not_implemented_err(self, inst_update_mock): From 284e5ac022e20ac8150df7d3a7692bd1e2deadbf Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 16 Jul 2014 17:42:54 -0400 Subject: [PATCH 090/486] Cleanup and gate on hacking E711 and E712 rule Fix the offending instances of the rule and removes it from the ignore list in tox.ini so that we can gate on E711 and E712 rules. Most violations were in DB queries. Replace as follows: False -> sqlalchemy.sql.false() None -> sqlalchemy.sql.null() True -> sqlalchemy.sql.true() Change-Id: Id84aa04697d1b3c23dc03195113e296c6715379d --- .../compute/plugins/v3/hypervisors.py | 2 +- nova/db/sqlalchemy/api.py | 35 ++++++++++--------- nova/network/manager.py | 2 +- nova/scheduler/filters/trusted_filter.py | 2 +- .../compute/contrib/test_flavor_access.py | 2 +- .../compute/plugins/v3/test_flavor_access.py | 2 +- nova/tests/api/openstack/fakes.py | 2 +- nova/tests/compute/test_compute.py | 4 +-- nova/tests/conductor/test_conductor.py | 2 +- nova/tests/keymgr/test_key.py | 4 +-- nova/tests/virt/xenapi/test_xenapi.py | 2 +- nova/virt/baremetal/db/sqlalchemy/api.py | 7 ++-- nova/virt/baremetal/ipmi.py | 8 ++--- nova/virt/baremetal/pxe.py | 2 +- nova/virt/baremetal/tilera.py | 2 +- nova/virt/baremetal/tilera_pdu.py | 8 ++--- nova/virt/vmwareapi/vm_util.py | 2 +- nova/virt/xenapi/vm_utils.py | 2 +- tox.ini | 4 +-- 19 files changed, 48 insertions(+), 46 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/hypervisors.py b/nova/api/openstack/compute/plugins/v3/hypervisors.py index 80d0250bc2..cf145e6283 100644 --- a/nova/api/openstack/compute/plugins/v3/hypervisors.py +++ b/nova/api/openstack/compute/plugins/v3/hypervisors.py @@ -54,7 +54,7 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs): 'host': hypervisor['service']['host'], } - if servers != None: + if servers is not None: hyp_dict['servers'] = [dict(name=serv['name'], id=serv['uuid']) for serv in servers] diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a333cb2721..f6e4cdf8e0 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -43,7 +43,10 @@ from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import select +from sqlalchemy.sql import false from sqlalchemy.sql import func +from sqlalchemy.sql import null +from sqlalchemy.sql import true from sqlalchemy import String from nova import block_device @@ -262,7 +265,7 @@ def issubclassof_nova_base(obj): if project_only == 'allow_none': query = query.\ filter(or_(base_model.project_id == context.project_id, - base_model.project_id == None)) + base_model.project_id == null())) else: query = query.filter_by(project_id=context.project_id) @@ -685,7 +688,7 @@ def compute_node_statistics(context): func.sum(models.ComputeNode.disk_available_least), base_model=models.ComputeNode, read_deleted="no").\ - filter(models.Service.disabled == False).\ + filter(models.Service.disabled == false()).\ filter( models.Service.id == models.ComputeNode.service_id).\ @@ -907,7 +910,7 @@ def floating_ip_deallocate(context, address): floating_ip_ref = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ - filter(models.FloatingIp.project_id != None).\ + filter(models.FloatingIp.project_id != null()).\ with_lockmode('update').\ first() @@ -1129,7 +1132,7 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None, session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, - models.FixedIp.network_id == None) + models.FixedIp.network_id == null()) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ @@ -1162,7 +1165,7 @@ def fixed_ip_associate_pool(context, network_id, instance_uuid=None, session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, - models.FixedIp.network_id == None) + models.FixedIp.network_id == null()) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ @@ -1234,12 +1237,12 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): # join with update doesn't work. with session.begin(): host_filter = or_(and_(models.Instance.host == host, - models.Network.multi_host == True), + models.Network.multi_host == true()), models.Network.host == host) result = model_query(context, models.FixedIp.id, base_model=models.FixedIp, read_deleted="no", session=session).\ - filter(models.FixedIp.allocated == False).\ + filter(models.FixedIp.allocated == false()).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ @@ -1930,7 +1933,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, # but until then we test it explicitly as a workaround. not_soft_deleted = or_( models.Instance.vm_state != vm_states.SOFT_DELETED, - models.Instance.vm_state == None + models.Instance.vm_state == null() ) query_prefix = query_prefix.filter(not_soft_deleted) @@ -2079,7 +2082,7 @@ def instance_get_active_by_window_joined(context, begin, end=None, query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ - filter(or_(models.Instance.terminated_at == None, + filter(or_(models.Instance.terminated_at == null(), models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) @@ -2655,8 +2658,8 @@ def network_get_associated_fixed_ips(context, network_id, host=None): filter(models.FixedIp.network_id == network_id).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ - filter(models.FixedIp.instance_uuid != None).\ - filter(models.FixedIp.virtual_interface_id != None) + filter(models.FixedIp.instance_uuid != null()).\ + filter(models.FixedIp.virtual_interface_id != null()) if host: query = query.filter(models.Instance.host == host) result = query.all() @@ -2964,7 +2967,7 @@ def _quota_usage_get_all(context, project_id, user_id=None): result = {'project_id': project_id} if user_id: query = query.filter(or_(models.QuotaUsage.user_id == user_id, - models.QuotaUsage.user_id == None)) + models.QuotaUsage.user_id == null())) result['user_id'] = user_id rows = query.all() @@ -3018,7 +3021,7 @@ def quota_usage_update(context, project_id, user_id, resource, **kwargs): filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter(or_(models.QuotaUsage.user_id == user_id, - models.QuotaUsage.user_id == None)).\ + models.QuotaUsage.user_id == null())).\ update(updates) if not result: @@ -4284,7 +4287,7 @@ def _flavor_get_query(context, session=None, read_deleted=None): read_deleted=read_deleted).\ options(joinedload('extra_specs')) if not context.is_admin: - the_filter = [models.InstanceTypes.is_public == True] + the_filter = [models.InstanceTypes.is_public == true()] the_filter.extend([ models.InstanceTypes.projects.any(project_id=context.project_id) ]) @@ -4840,9 +4843,9 @@ def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out, def vol_get_usage_by_time(context, begin): """Return volumes usage that have been updated after a specified time.""" return model_query(context, models.VolumeUsage, read_deleted="yes").\ - filter(or_(models.VolumeUsage.tot_last_refreshed == None, + filter(or_(models.VolumeUsage.tot_last_refreshed == null(), models.VolumeUsage.tot_last_refreshed > begin, - models.VolumeUsage.curr_last_refreshed == None, + models.VolumeUsage.curr_last_refreshed == null(), models.VolumeUsage.curr_last_refreshed > begin, )).\ all() diff --git a/nova/network/manager.py b/nova/network/manager.py index dae573feea..ac83165c2b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1330,7 +1330,7 @@ def _create_fixed_ips(self, context, network_id, fixed_cidr=None, # to properties of the manager class? bottom_reserved = self._bottom_reserved_ips top_reserved = self._top_reserved_ips - if extra_reserved == None: + if extra_reserved is None: extra_reserved = [] if not fixed_cidr: diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index af3c2f899b..bd0f41b26c 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -176,7 +176,7 @@ def do_attestation(self, hosts): result = None status, data = self._request("POST", "PollHosts", hosts) - if data != None: + if data is not None: result = data.get('hosts') return result diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py index d416d75009..c2ade23401 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py @@ -84,7 +84,7 @@ def _has_flavor_access(flavorid, projectid): def fake_get_all_flavors_sorted_list(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): - if filters == None or filters['is_public'] == None: + if filters is None or filters['is_public'] is None: return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key]) res = {} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py index c2b310cb1c..595bc7321d 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py @@ -83,7 +83,7 @@ def _has_flavor_access(flavorid, projectid): def fake_get_all_flavors_sorted_list(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): - if filters == None or filters['is_public'] == None: + if filters is None or filters['is_public'] is None: return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key]) res = {} diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a41e48f484..0cf8188feb 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -568,7 +568,7 @@ def stub_instance(id, user_id=None, project_id=None, host=None, "availability_zone": availability_zone, "display_name": display_name or server_name, "display_description": "", - "locked": locked_by != None, + "locked": locked_by is not None, "locked_by": locked_by, "metadata": metadata, "access_ip_v4": access_ipv4, diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 29f99aeacb..0bc897d51f 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -3881,7 +3881,7 @@ def check_task_state(task_state): def _check_locked_by(self, instance_uuid, locked_by): instance = db.instance_get_by_uuid(self.context, instance_uuid) - self.assertEqual(instance['locked'], locked_by != None) + self.assertEqual(instance['locked'], locked_by is not None) self.assertEqual(instance['locked_by'], locked_by) return instance @@ -6616,7 +6616,7 @@ def _test_lifecycle_event(self, lifecycle_event, power_state): uuid = instance['uuid'] self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') - if power_state != None: + if power_state is not None: self.compute._sync_instance_power_state( mox.IgnoreArg(), mox.ContainsKeyValue('uuid', uuid), diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 13673be151..1b396af45b 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -128,7 +128,7 @@ def test_instance_update(self): def test_instance_update_invalid_key(self): # NOTE(danms): the real DB API call ignores invalid keys - if self.db == None: + if self.db is None: self.conductor = utils.ExceptionHelper(self.conductor) self.assertRaises(KeyError, self._do_update, 'any-uuid', foobar=1) diff --git a/nova/tests/keymgr/test_key.py b/nova/tests/keymgr/test_key.py index a086c6527c..14766fd201 100644 --- a/nova/tests/keymgr/test_key.py +++ b/nova/tests/keymgr/test_key.py @@ -57,11 +57,11 @@ def test_get_encoded(self): def test___eq__(self): self.assertTrue(self.key == self.key) - self.assertFalse(self.key == None) + self.assertFalse(self.key is None) self.assertFalse(None == self.key) def test___ne__(self): self.assertFalse(self.key != self.key) - self.assertTrue(self.key != None) + self.assertTrue(self.key is not None) self.assertTrue(None != self.key) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 385f16ffc4..2abe9ca2d6 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -559,7 +559,7 @@ def check_vm_record(self, conn, instance_type_id, check_injection): self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes) self.assertEqual(self.vm['VCPUs_max'], str(vcpus)) self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus)) - if vcpu_weight == None: + if vcpu_weight is None: self.assertEqual(self.vm['VCPUs_params'], {}) else: self.assertEqual(self.vm['VCPUs_params'], diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py index 6fd916b0ab..21813be6a6 100644 --- a/nova/virt/baremetal/db/sqlalchemy/api.py +++ b/nova/virt/baremetal/db/sqlalchemy/api.py @@ -22,6 +22,7 @@ from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql import null import nova.context from nova.db.sqlalchemy import api as sqlalchemy_api @@ -91,7 +92,7 @@ def bm_node_get_all(context, service_host=None): @sqlalchemy_api.require_admin_context def bm_node_get_associated(context, service_host=None): query = model_query(context, models.BareMetalNode, read_deleted="no").\ - filter(models.BareMetalNode.instance_uuid != None) + filter(models.BareMetalNode.instance_uuid != null()) if service_host: query = query.filter_by(service_host=service_host) return query.all() @@ -100,7 +101,7 @@ def bm_node_get_associated(context, service_host=None): @sqlalchemy_api.require_admin_context def bm_node_get_unassociated(context, service_host=None): query = model_query(context, models.BareMetalNode, read_deleted="no").\ - filter(models.BareMetalNode.instance_uuid == None) + filter(models.BareMetalNode.instance_uuid == null()) if service_host: query = query.filter_by(service_host=service_host) return query.all() @@ -110,7 +111,7 @@ def bm_node_get_unassociated(context, service_host=None): def bm_node_find_free(context, service_host=None, cpus=None, memory_mb=None, local_gb=None): query = model_query(context, models.BareMetalNode, read_deleted="no") - query = query.filter(models.BareMetalNode.instance_uuid == None) + query = query.filter(models.BareMetalNode.instance_uuid == null()) if service_host: query = query.filter_by(service_host=service_host) if cpus is not None: diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py index 473ad58dc0..83dbe4d5bb 100644 --- a/nova/virt/baremetal/ipmi.py +++ b/nova/virt/baremetal/ipmi.py @@ -107,16 +107,16 @@ def __init__(self, node, **kwargs): self.password = node['pm_password'] self.port = node['terminal_port'] - if self.node_id == None: + if self.node_id is None: raise exception.InvalidParameterValue(_("Node id not supplied " "to IPMI")) - if self.address == None: + if self.address is None: raise exception.InvalidParameterValue(_("Address not supplied " "to IPMI")) - if self.user == None: + if self.user is None: raise exception.InvalidParameterValue(_("User not supplied " "to IPMI")) - if self.password == None: + if self.password is None: raise exception.InvalidParameterValue(_("Password not supplied " "to IPMI")) diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index 0d156f7ec9..5b57aa2928 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -468,7 +468,7 @@ def _wait_for_deploy(): status = row.get('task_state') if (status == baremetal_states.DEPLOYING - and locals['started'] == False): + and locals['started'] is False): LOG.info(_("PXE deploy started for instance %s") % instance['uuid']) locals['started'] = True diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py index 7588a97ece..6bd96c716a 100644 --- a/nova/virt/baremetal/tilera.py +++ b/nova/virt/baremetal/tilera.py @@ -319,7 +319,7 @@ def activate_node(self, context, node, instance): status = row.get('task_state') if (status == baremetal_states.DEPLOYING and - locals['started'] == False): + locals['started'] is False): LOG.info(_('Tilera deploy started for instance %s') % instance['uuid']) locals['started'] = True diff --git a/nova/virt/baremetal/tilera_pdu.py b/nova/virt/baremetal/tilera_pdu.py index dc84c00518..8bbb1a0615 100644 --- a/nova/virt/baremetal/tilera_pdu.py +++ b/nova/virt/baremetal/tilera_pdu.py @@ -79,16 +79,16 @@ def __init__(self, node, **kwargs): self.password = node['pm_password'] self.port = node['terminal_port'] - if self.node_id == None: + if self.node_id is None: raise exception.InvalidParameterValue(_("Node id not supplied " "to PDU")) - if self.address == None: + if self.address is None: raise exception.InvalidParameterValue(_("Address not supplied " "to PDU")) - if self.user == None: + if self.user is None: raise exception.InvalidParameterValue(_("User not supplied " "to PDU")) - if self.password == None: + if self.password is None: raise exception.InvalidParameterValue(_("Password not supplied " "to PDU")) diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 7a60242b23..eb80fe899f 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -951,7 +951,7 @@ def get_stats_from_cluster(session, cluster): for obj in result.objects: hardware_summary = obj.propSet[0].val runtime_summary = obj.propSet[1].val - if (runtime_summary.inMaintenanceMode == False and + if (runtime_summary.inMaintenanceMode is False and runtime_summary.connectionState == "connected"): # Total vcpus is the sum of all pCPUs of individual hosts # The overcommitment ratio is factored in by the scheduler diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e810b4a846..e145347e23 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -424,7 +424,7 @@ def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', """Create a VBD record and returns its reference.""" vbd_rec = {} vbd_rec['VM'] = vm_ref - if vdi_ref == None: + if vdi_ref is None: vdi_ref = 'OpaqueRef:NULL' vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = str(userdevice) diff --git a/tox.ini b/tox.ini index 1d36ca286d..06dc18e388 100644 --- a/tox.ini +++ b/tox.ini @@ -52,8 +52,6 @@ commands = python setup.py build_sphinx sitepackages = False [flake8] -# E712 is ignored on purpose, since it is normal to use 'column == true' -# in sqlalchemy. # H803 skipped on purpose per list discussion. # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # The rest of the ignores are TODOs @@ -61,7 +59,7 @@ sitepackages = False # Stricter in hacking 0.9: F402 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,E711,E712,F402,H405,H904 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,F402,H405,H904 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools [hacking] From f71d2fc966cb44b8d74b4432179622d6c9e1f284 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Thu, 3 Jul 2014 01:55:23 +0800 Subject: [PATCH 091/486] Add debug log for pci passthrough filter Sometimes operator need information why the host doesn't pass the check of scheduler, this patch adds information for pci passthrough filter if it falied to pass check. Change-Id: Ic8fe13174fda5c75ce95fc0b64391ae081b36c6d Partial-Bug: #1301830 --- nova/scheduler/filters/pci_passthrough_filter.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py index dc71e18baa..0726d22148 100644 --- a/nova/scheduler/filters/pci_passthrough_filter.py +++ b/nova/scheduler/filters/pci_passthrough_filter.py @@ -13,8 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. +from nova.openstack.common import log as logging from nova.scheduler import filters +LOG = logging.getLogger(__name__) + class PciPassthroughFilter(filters.BaseHostFilter): """Pci Passthrough Filter based on PCI request @@ -34,7 +37,12 @@ class PciPassthroughFilter(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): """Return true if the host has the required PCI devices.""" - if not filter_properties.get('pci_requests'): + pci_requests = filter_properties.get('pci_requests') + if not pci_requests: return True - return host_state.pci_stats.support_requests( - filter_properties.get('pci_requests')) + if not host_state.pci_stats.support_requests(pci_requests): + LOG.debug("%(host_state)s doesn't have the required PCI devices" + " (%(requests)s)", + {'host_state': host_state, 'requests': pci_requests}) + return False + return True From 97f81734c030f7f704369b37e1fe122c7040b9a6 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 19 Jul 2014 10:04:12 -0700 Subject: [PATCH 092/486] Revert "Add missing image to instance booted from volume" This reverts commit c3191cf0ba5ad3dc2df8da2a2bf5c9d270fde9d9. The change needs to be thought out some more, i.e. why isn't compute_api.get_all just loading up the image_ref value on the instance object rather than calling a helper method from the servers extension to the instance object which just calls back to compute_api, seems very roundabout, plus isn't it a v3 API issue also? Conflicts: nova/api/openstack/compute/servers.py Due to: 826aed0 Use oslo.i18n Change-Id: I6abbfdfa786c3d98065c969e7f9d7d5830caf7e8 Closes-Bug: #1343689 --- nova/api/openstack/compute/servers.py | 6 +-- nova/objects/instance.py | 15 ------- .../api/openstack/compute/test_servers.py | 44 ------------------- nova/tests/objects/test_instance.py | 12 ----- 4 files changed, 1 insertion(+), 76 deletions(-) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 5bc97e355c..c63cae40ea 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -35,7 +35,6 @@ from nova import exception from nova.i18n import _ from nova import objects -from nova.objects import instance as instance_obj from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils @@ -600,8 +599,6 @@ def _get_servers(self, req, is_detail): limit=limit, marker=marker, want_objects=True) - for instance in instance_list: - instance_obj.add_image_ref(context, instance) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) @@ -769,8 +766,7 @@ def show(self, req, id): context = req.environ['nova.context'] instance = self.compute_api.get(context, id, want_objects=True) - req.cache_db_instance(instance_obj.add_image_ref(context, - instance)) + req.cache_db_instance(instance) return self._view_builder.show(req, instance) except exception.NotFound: msg = _("Instance could not be found") diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 5c7fe317ac..275e7db89d 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -14,7 +14,6 @@ from nova.cells import opts as cells_opts from nova.cells import rpcapi as cells_rpcapi -from nova import compute from nova.compute import flavors from nova import db from nova import exception @@ -568,20 +567,6 @@ def delete_metadata_key(self, context, key): self.obj_reset_changes(['metadata']) -def add_image_ref(context, instance): - """Helper method to add image_ref to instance object.""" - if not instance['image_ref']: - compute_api = compute.API() - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance['uuid']) - if compute_api.is_volume_backed_instance(context, instance, bdms): - props = bdms.root_metadata( - context, compute_api.image_api, - compute_api.volume_api) - instance['image_ref'] = props['image_id'] - return instance - - def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): get_fault = expected_attrs and 'fault' in expected_attrs inst_faults = {} diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 8168a169c9..513df07f3a 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -196,8 +196,6 @@ class ServersControllerTest(ControllerTest): def setUp(self): super(ServersControllerTest, self).setUp() - self.compute_api = self.controller.compute_api - self.context = context.RequestContext('fake', 'fake') def test_can_check_loaded_extensions(self): self.ext_mgr.extensions = {'os-fake': None} @@ -261,25 +259,6 @@ def test_get_server_by_uuid(self): res_dict = self.controller.show(req, FAKE_UUID) self.assertEqual(res_dict['server']['id'], FAKE_UUID) - def test_get_server_no_image(self): - - def return_instance(self, *args, **kwargs): - return fakes.stub_instance(id=1, uuid=FAKE_UUID, - project_id=str(uuid.uuid4()), - image_ref='') - - def fake_add_image_ref(context, instance): - instance['image_ref'] = 'fake_image' - return instance - - self.stubs.Set(db, 'instance_get_by_uuid', return_instance) - self.stubs.Set(instance_obj, 'add_image_ref', fake_add_image_ref) - - req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) - server = self.controller.show(req, FAKE_UUID) - - self.assertEqual('fake_image', server['server']['image']['id']) - def test_unique_host_id(self): """Create two servers with the same host and different project_ids and check that the hostId's are unique. @@ -538,29 +517,6 @@ def test_get_server_list(self): self.assertEqual(s['links'], expected_links) - def test_get_servers_no_image(self): - - def fake_get_all(compute_self, context, search_opts=None, - sort_key=None, sort_dir='desc', - limit=None, marker=None, want_objects=False): - db_list = [fakes.stub_instance(100, - uuid=FAKE_UUID, - image_ref='')] - return instance_obj._make_instance_list( - context, objects.InstanceList(), db_list, FIELDS) - - def fake_add_image_ref(context, instance): - instance['image_ref'] = 'fake_image' - return instance - - self.stubs.Set(instance_obj, 'add_image_ref', fake_add_image_ref) - self.stubs.Set(compute_api.API, 'get_all', fake_get_all) - - req = fakes.HTTPRequest.blank('/fake/servers/detail') - res_dict = self.controller.detail(req) - for s in res_dict['servers']: - self.assertEqual('fake_image', s['image']['id']) - def test_get_servers_with_limit(self): req = fakes.HTTPRequest.blank('/fake/servers?limit=3') res_dict = self.controller.index(req) diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py index c3bb88f1bd..2d35fd9386 100644 --- a/nova/tests/objects/test_instance.py +++ b/nova/tests/objects/test_instance.py @@ -21,7 +21,6 @@ from nova.cells import rpcapi as cells_rpcapi from nova.compute import flavors -from nova import context from nova import db from nova import exception from nova.network import model as network_model @@ -1089,14 +1088,3 @@ def test_expected_cols(self): self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar']) self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar'])) self.assertIsNone(instance._expected_cols(None)) - - -class TestAddImageRef(test.TestCase): - @mock.patch('nova.objects.BlockDeviceMappingList.root_metadata') - def test_add_image_ref(self, mock_root_metadata): - mock_root_metadata.return_value = {'image_id': 'fake_image'} - fake_instance = fakes.stub_instance(id=1, uuid=fakes.FAKE_UUID, - image_ref='') - ctx = context.RequestContext('fake-user', 'fake-project') - new_instance = instance.add_image_ref(ctx, fake_instance) - self.assertEqual('fake_image', new_instance['image_ref']) From b33cb0664fdcbfcdb1c6b8f7db806b7f842415fa Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Tue, 15 Jul 2014 15:24:12 -0700 Subject: [PATCH 093/486] Add missing foreign key on pci_devices.compute_node_id The model specifies a foreign key, but no migrations have ever added it. Closes-Bug: 1342834 Change-Id: Ia9151b5a7389fbb4c39155f7e192c24bd99e7a9a --- .../versions/246_add_compute_node_id_fk.py | 41 +++++++++++++++ .../versions/246_sqlite_downgrade.sql | 49 ++++++++++++++++++ .../versions/246_sqlite_upgrade.sql | 50 +++++++++++++++++++ nova/tests/db/test_migrations.py | 10 ++++ 4 files changed, 150 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql diff --git a/nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py b/nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py new file mode 100644 index 0000000000..a7e32e2d64 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py @@ -0,0 +1,41 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from migrate.changeset.constraint import ForeignKeyConstraint +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + """Add missing foreign key constraint on pci_devices.compute_node_id.""" + meta = MetaData(bind=migrate_engine) + + pci_devices = Table('pci_devices', meta, autoload=True) + compute_nodes = Table('compute_nodes', meta, autoload=True) + + fkey = ForeignKeyConstraint(columns=[pci_devices.c.compute_node_id], + refcolumns=[compute_nodes.c.id]) + fkey.create() + + +def downgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + pci_devices = Table('pci_devices', meta, autoload=True) + compute_nodes = Table('compute_nodes', meta, autoload=True) + + fkey = ForeignKeyConstraint(columns=[pci_devices.c.compute_node_id], + refcolumns=[compute_nodes.c.id]) + fkey.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql new file mode 100644 index 0000000000..57c0db235f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql @@ -0,0 +1,49 @@ +BEGIN TRANSACTION; + CREATE TABLE pci_devices_new ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted INTEGER, + id INTEGER NOT NULL, + compute_node_id INTEGER NOT NULL, + address VARCHAR(12) NOT NULL, + vendor_id VARCHAR(4) NOT NULL, + product_id VARCHAR(4) NOT NULL, + dev_type VARCHAR(8) NOT NULL, + dev_id VARCHAR(255), + label VARCHAR(255) NOT NULL, + status VARCHAR(36) NOT NULL, + extra_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + CONSTRAINT uniq_pci_devices0compute_node_id0address0deleted UNIQUE (compute_node_id, address, deleted) + ); + + INSERT INTO pci_devices_new + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + compute_node_id, + address, + vendor_id, + product_id, + dev_type, + dev_id, + label, + status, + extra_info, + instance_uuid + FROM pci_devices; + + DROP TABLE pci_devices; + + ALTER TABLE pci_devices_new RENAME TO pci_devices; + + CREATE INDEX ix_pci_devices_compute_node_id_deleted + ON pci_devices (compute_node_id, deleted); + + CREATE INDEX ix_pci_devices_instance_uuid_deleted + ON pci_devices (instance_uuid, deleted); +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql new file mode 100644 index 0000000000..8aa9ecc78e --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql @@ -0,0 +1,50 @@ +BEGIN TRANSACTION; + CREATE TABLE pci_devices_new ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted INTEGER, + id INTEGER NOT NULL, + compute_node_id INTEGER NOT NULL, + address VARCHAR(12) NOT NULL, + vendor_id VARCHAR(4) NOT NULL, + product_id VARCHAR(4) NOT NULL, + dev_type VARCHAR(8) NOT NULL, + dev_id VARCHAR(255), + label VARCHAR(255) NOT NULL, + status VARCHAR(36) NOT NULL, + extra_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY (compute_node_id) REFERENCES compute_nodes(id), + CONSTRAINT uniq_pci_devices0compute_node_id0address0deleted UNIQUE (compute_node_id, address, deleted) + ); + + INSERT INTO pci_devices_new + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + compute_node_id, + address, + vendor_id, + product_id, + dev_type, + dev_id, + label, + status, + extra_info, + instance_uuid + FROM pci_devices; + + DROP TABLE pci_devices; + + ALTER TABLE pci_devices_new RENAME TO pci_devices; + + CREATE INDEX ix_pci_devices_compute_node_id_deleted + ON pci_devices (compute_node_id, deleted); + + CREATE INDEX ix_pci_devices_instance_uuid_deleted + ON pci_devices (instance_uuid, deleted); +COMMIT; diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index a86d82072a..03e761056f 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -707,6 +707,16 @@ def _post_downgrade_245(self, engine): self.assertColumnNotExists(engine, 'networks', 'enable_dhcp') self.assertColumnNotExists(engine, 'networks', 'share_address') + def _check_246(self, engine, data): + pci_devices = oslodbutils.get_table(engine, 'pci_devices') + self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys + if fk.parent.name == 'compute_node_id'])) + + def _post_downgrade_246(self, engine): + pci_devices = oslodbutils.get_table(engine, 'pci_devices') + self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys + if fk.parent.name == 'compute_node_id'])) + class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" From b4a32a9623a31ef215adb6b523fbed994ee5c6b6 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Wed, 18 Jun 2014 01:03:13 +0800 Subject: [PATCH 094/486] Remove rescue/unrescue NotImplementedError handle There are 2 kinds of RPC call from API layer to compute layer, one is cast and another is call. For cast, the RPC message will be posted and the API service will not wait for the message to be processed. So it won't be able to catch the exception raised in compute layer and catch and handle the exception is useless and error leading. This patch removes code in API layer for rescue and unrescue functions. Change-Id: Iab8070c25a9dee505c0602efad85c41c21c2aa58 --- nova/api/openstack/compute/contrib/rescue.py | 7 ---- .../openstack/compute/plugins/v3/rescue.py | 7 ---- .../openstack/compute/contrib/test_rescue.py | 33 ------------------- .../compute/plugins/v3/test_rescue.py | 32 ------------------ 4 files changed, 79 deletions(-) diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py index 15338a2ffc..47ff29f71d 100644 --- a/nova/api/openstack/compute/contrib/rescue.py +++ b/nova/api/openstack/compute/contrib/rescue.py @@ -74,10 +74,6 @@ def _rescue(self, req, id, body): except exception.InstanceNotRescuable as non_rescuable: raise exc.HTTPBadRequest( explanation=non_rescuable.format_message()) - except NotImplementedError: - msg = _("The rescue operation is not implemented by this " - "cloud.") - raise exc.HTTPNotImplemented(explanation=msg) return {'adminPass': password} @@ -94,9 +90,6 @@ def _unrescue(self, req, id, body): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unrescue') - except NotImplementedError: - msg = _("The unrescue operation is not implemented by this cloud.") - raise exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202) diff --git a/nova/api/openstack/compute/plugins/v3/rescue.py b/nova/api/openstack/compute/plugins/v3/rescue.py index 138c21116c..5a5a703682 100644 --- a/nova/api/openstack/compute/plugins/v3/rescue.py +++ b/nova/api/openstack/compute/plugins/v3/rescue.py @@ -25,7 +25,6 @@ from nova.api import validation from nova import compute from nova import exception -from nova.i18n import _ from nova import utils @@ -76,9 +75,6 @@ def _rescue(self, req, id, body): except exception.InstanceNotRescuable as non_rescuable: raise exc.HTTPBadRequest( explanation=non_rescuable.format_message()) - except NotImplementedError: - msg = _("The rescue operation is not implemented by this cloud.") - raise exc.HTTPNotImplemented(explanation=msg) if CONF.enable_instance_password: return {'admin_password': password} @@ -100,9 +96,6 @@ def _unrescue(self, req, id, body): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unrescue') - except NotImplementedError: - msg = _("The unrescue operation is not implemented by this cloud.") - raise exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202) diff --git a/nova/tests/api/openstack/compute/contrib/test_rescue.py b/nova/tests/api/openstack/compute/contrib/test_rescue.py index c533a1d56d..e69196d6ad 100644 --- a/nova/tests/api/openstack/compute/contrib/test_rescue.py +++ b/nova/tests/api/openstack/compute/contrib/test_rescue.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import mock from oslo.config import cfg import webob @@ -165,35 +164,3 @@ def fake_rescue(*args, **kwargs): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 400) - - @mock.patch('nova.compute.api.API.rescue') - def test_rescue_raises_not_implemented(self, rescue_mock): - body = dict(rescue=None) - - def fake_rescue(*args, **kwargs): - raise NotImplementedError('not implemented') - - rescue_mock.side_effect = fake_rescue - req = webob.Request.blank('/v2/fake/servers/test_inst/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 501) - - @mock.patch('nova.compute.api.API.unrescue') - def test_unrescue_raises_not_implemented(self, unrescue_mock): - body = dict(unrescue=None) - - def fake_unrescue(*args, **kwargs): - raise NotImplementedError('not implemented') - - unrescue_mock.side_effect = fake_unrescue - req = webob.Request.blank('/v2/fake/servers/test_inst/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 501) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py b/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py index a2814760b9..786580da39 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_rescue.py @@ -244,35 +244,3 @@ def test_rescue_with_invalid_property(self): resp = req.get_response(self.app) self.assertEqual(400, resp.status_int) - - @mock.patch('nova.compute.api.API.rescue') - def test_rescue_raises_not_implemented(self, rescue_mock): - body = dict(rescue=None) - - def fake_rescue(*args, **kwargs): - raise NotImplementedError('fake message') - - rescue_mock.side_effect = fake_rescue - req = webob.Request.blank('/v3/servers/test_inst/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 501) - - @mock.patch('nova.compute.api.API.unrescue') - def test_unrescue_raises_not_implemented(self, unrescue_mock): - body = dict(unrescue=None) - - def fake_unrescue(*args, **kwargs): - raise NotImplementedError('fake message') - - unrescue_mock.side_effect = fake_unrescue - req = webob.Request.blank('/v3/servers/test_inst/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - resp = req.get_response(self.app) - self.assertEqual(resp.status_int, 501) From b3144d30d38e2aabaceb620bd04d150b18afb0a4 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sat, 19 Jul 2014 18:50:34 -0400 Subject: [PATCH 095/486] Removes useless stub of glanceclient create There was a stub out of glanceclient.create that was being called from a couple unit test setUp()'s, but the stub method stubs out something that is overridden by the stub_out_glance() fake method. Change-Id: I300f6ccc999958c33a2dd729cb8bde89bed63967 --- .../compute/plugins/v3/test_server_actions.py | 1 - .../api/openstack/compute/test_server_actions.py | 1 - nova/tests/api/openstack/fakes.py | 15 --------------- 3 files changed, 17 deletions(-) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index d15c8a007b..c6d80774d4 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -93,7 +93,6 @@ def setUp(self): service_class = 'nova.image.glance.GlanceImageService' self.service = importutils.import_object(service_class) self.sent_to_glance = {} - fakes.stub_out_glanceclient_create(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 799a755d85..950cadef21 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -95,7 +95,6 @@ def setUp(self): service_class = 'nova.image.glance.GlanceImageService' self.service = importutils.import_object(service_class) self.sent_to_glance = {} - fakes.stub_out_glanceclient_create(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a41e48f484..f0afb8daea 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ import datetime import uuid -import glanceclient.v1.images import routes import six import webob @@ -289,20 +288,6 @@ def add_fixture(**kwargs): return fixtures -def stub_out_glanceclient_create(stubs, sent_to_glance): - """We return the metadata sent to glance by modifying the sent_to_glance - dict in place - """ - orig_add_image = glanceclient.v1.images.ImageManager.create - - def fake_create(context, metadata, data=None): - sent_to_glance['metadata'] = metadata - sent_to_glance['data'] = data - return orig_add_image(metadata, data) - - stubs.Set(glanceclient.v1.images.ImageManager, 'create', fake_create) - - def stub_out_glance(stubs): def fake_get_remote_image_service(): client = glance_stubs.StubGlanceClient(_make_image_fixtures()) From 0b2cf281be48e76a80e2fc73f00529f22e8d9126 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 28 Apr 2014 11:42:04 +0800 Subject: [PATCH 096/486] Keep Migration status in automatic confirm-resize function in compute/manager.py _poll_unconfirmed_resizes will translate the migrate status from finished to error whenever finds a problem ,Consider the following case 1) _poll_unconfirmed_resizes runs and founds several migrations to be confirmed 2) user deletes the instance, so its task state will be changed to DELETING 3) _poll_unconfirmed_resizes will find the task state is not None, it will make the migration error status 4) following code in _delete if instance.vm_state == vm_states.RESIZED: self._confirm_resize_on_deleting(context, instance) will fail because migration status already updated User is able to do the migration operations when they found error so we don't need to handle this kind of error for them Change-Id: Ia79dd56c9c1fa8add7dc138d838cbfbac4523ac3 Closes-Bug: #1301696 --- nova/compute/manager.py | 11 +++++++++++ nova/tests/compute/test_compute.py | 12 ++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f1e744f340..86b28ebd0a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -5165,6 +5165,17 @@ def _set_migration_to_error(migration, reason, **kwargs): _set_migration_to_error(migration, reason, instance=instance) continue + # race condition: The instance in DELETING state should not be + # set the migration state to error, otherwise the instance in + # to be deleted which is in RESIZED state + # will not be able to confirm resize + if instance.task_state in [task_states.DELETING, + task_states.SOFT_DELETING]: + msg = ("Instance being deleted or soft deleted during resize " + "confirmation. Skipping.") + LOG.debug(msg, instance=instance) + continue + vm_state = instance['vm_state'] task_state = instance['task_state'] if vm_state != vm_states.RESIZED or task_state is not None: diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 6448ceb2b6..5aecf7f539 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -6113,16 +6113,24 @@ def test_poll_unconfirmed_resizes(self): fake_instance.fake_db_instance(uuid='fake_uuid5', vm_state=vm_states.ACTIVE, task_state=None), + # The expceted migration result will be None instead of error + # since _poll_unconfirmed_resizes will not change it + # when the instance vm state is RESIZED and task state + # is deleting, see bug 1301696 for more detail fake_instance.fake_db_instance(uuid='fake_uuid6', vm_state=vm_states.RESIZED, - task_state='deleting')] + task_state='deleting'), + fake_instance.fake_db_instance(uuid='fake_uuid7', + vm_state=vm_states.RESIZED, + task_state='soft-deleting')] expected_migration_status = {'fake_uuid1': 'confirmed', 'noexist': 'error', 'fake_uuid2': 'error', 'fake_uuid3': 'error', 'fake_uuid4': None, 'fake_uuid5': 'error', - 'fake_uuid6': 'error'} + 'fake_uuid6': None, + 'fake_uuid7': None} migrations = [] for i, instance in enumerate(instances, start=1): fake_mig = test_migration.fake_db_migration() From c2ec61425f0cd244de3741e22f01ec641183718a Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sun, 20 Jul 2014 13:08:09 -0400 Subject: [PATCH 097/486] Correct use of nova.image.glance in compute API The nova.api.openstack.compute.images modules was using the nova.image.glance module directly, instead of using the new, standardized nova.image.API module. This patch corrects that usage and updates the associated unit tests to use mock instead of the nova.tests.api.fake.stub_out_glance() method. Copies the nova.tests.api.fake._make_image_fixtures() call to a new nova.tests.image_fixtures module, and corrects a number of errors in the original function, including not returning datetime objects for timestamp fields (as the nova.image.API returns) and ensuring that the ID field is a string in the returned fixture dicts. The _make_image_fixtures() function and the stub_out_glance() function will be removed in the next patch, which fixes up the image_metadata module, which is the last unit test that uses them. Change-Id: Icd22ff3f438b8ba476f2637c7d4129ea6d0f45e8 --- nova/api/openstack/compute/images.py | 24 ++-- .../compute/plugins/v3/test_server_actions.py | 5 - .../api/openstack/compute/test_images.py | 117 +++++++----------- .../openstack/compute/test_server_actions.py | 5 - nova/tests/api/openstack/fakes.py | 3 + nova/tests/image_fixtures.py | 78 ++++++++++++ 6 files changed, 138 insertions(+), 94 deletions(-) create mode 100644 nova/tests/image_fixtures.py diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py index 216e0858f7..0763bc7354 100644 --- a/nova/api/openstack/compute/images.py +++ b/nova/api/openstack/compute/images.py @@ -21,7 +21,7 @@ from nova.api.openstack import xmlutil from nova import exception from nova.i18n import _ -import nova.image.glance +import nova.image import nova.utils @@ -89,15 +89,9 @@ class Controller(wsgi.Controller): _view_builder_class = views_images.ViewBuilder - def __init__(self, image_service=None, **kwargs): - """Initialize new `ImageController`. - - :param image_service: `nova.image.glance:GlanceImageService` - - """ + def __init__(self, **kwargs): super(Controller, self).__init__(**kwargs) - self._image_service = (image_service or - nova.image.glance.get_default_image_service()) + self._image_api = nova.image.API() def _get_filters(self, req): """Return a dictionary of query param filters from the request. @@ -136,7 +130,7 @@ def show(self, req, id): context = req.environ['nova.context'] try: - image = self._image_service.show(context, id) + image = self._image_api.get(context, id) except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise webob.exc.HTTPNotFound(explanation=explanation) @@ -152,7 +146,7 @@ def delete(self, req, id): """ context = req.environ['nova.context'] try: - self._image_service.delete(context, id) + self._image_api.delete(context, id) except exception.ImageNotFound: explanation = _("Image not found.") raise webob.exc.HTTPNotFound(explanation=explanation) @@ -178,8 +172,8 @@ def index(self, req): params[key] = val try: - images = self._image_service.detail(context, filters=filters, - **page_params) + images = self._image_api.get_all(context, filters=filters, + **page_params) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return self._view_builder.index(req, images) @@ -198,8 +192,8 @@ def detail(self, req): for key, val in page_params.iteritems(): params[key] = val try: - images = self._image_service.detail(context, filters=filters, - **page_params) + images = self._image_api.get_all(context, filters=filters, + **page_params) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index c6d80774d4..4addad38a5 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -29,7 +29,6 @@ from nova import exception from nova.image import glance from nova import objects -from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import uuidutils from nova import test @@ -86,13 +85,9 @@ def setUp(self): self.stubs.Set(db, 'instance_update_and_get_original', instance_update_and_get_original) - fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) - service_class = 'nova.image.glance.GlanceImageService' - self.service = importutils.import_object(service_class) - self.sent_to_glance = {} self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID diff --git a/nova/tests/api/openstack/compute/test_images.py b/nova/tests/api/openstack/compute/test_images.py index f432b18268..b0df591111 100644 --- a/nova/tests/api/openstack/compute/test_images.py +++ b/nova/tests/api/openstack/compute/test_images.py @@ -21,7 +21,7 @@ import copy from lxml import etree -import six.moves.urllib.parse as urlparse +import mock import webob from nova.api.openstack.compute import images @@ -31,11 +31,13 @@ from nova.image import glance from nova import test from nova.tests.api.openstack import fakes +from nova.tests import image_fixtures from nova.tests import matchers NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" NOW_API_FORMAT = "2010-10-11T10:30:22Z" +IMAGE_FIXTURES = image_fixtures.get_image_fixtures() class ImagesControllerTest(test.NoDBTestCase): @@ -50,7 +52,6 @@ def setUp(self): fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fakes.stub_out_compute_api_backup(self.stubs) - fakes.stub_out_glance(self.stubs) self.controller = images.Controller() self.uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf' @@ -61,8 +62,6 @@ def setUp(self): self.server_bookmark = ( "http://localhost/fake/servers/" + self.server_uuid) self.alternate = "%s/fake/images/%s" - self.fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123') - self.actual_image = self.controller.show(self.fake_req, '124') self.expected_image_123 = { "image": {'id': '123', @@ -139,16 +138,19 @@ def setUp(self): }, } - self.image_service = self.mox.CreateMockAnything() + @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[0]) + def test_get_image(self, get_mocked): + request = fakes.HTTPRequest.blank('/v2/fake/images/123') + actual_image = self.controller.show(request, '123') + self.assertThat(actual_image, + matchers.DictMatches(self.expected_image_123)) + get_mocked.assert_called_once_with(mock.ANY, '123') - def test_get_image(self): - self.assertThat(self.actual_image, - matchers.DictMatches(self.expected_image_124)) - - def test_get_image_with_custom_prefix(self): + @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[1]) + def test_get_image_with_custom_prefix(self, _get_mocked): self.flags(osapi_compute_link_prefix='https://zoo.com:42', osapi_glance_link_prefix='http://circus.com:34') - fake_req = fakes.HTTPRequest.blank('/v2/fake/images/123') + fake_req = fakes.HTTPRequest.blank('/v2/fake/images/124') actual_image = self.controller.show(fake_req, '124') expected_image = self.expected_image_124 @@ -165,14 +167,18 @@ def test_get_image_with_custom_prefix(self): self.assertThat(actual_image, matchers.DictMatches(expected_image)) - def test_get_image_404(self): + @mock.patch('nova.image.api.API.get', side_effect=exception.NotFound) + def test_get_image_404(self, _get_mocked): fake_req = fakes.HTTPRequest.blank('/v2/fake/images/unknown') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, fake_req, 'unknown') - def test_get_image_details(self): + @mock.patch('nova.image.api.API.get_all', return_value=IMAGE_FIXTURES) + def test_get_image_details(self, get_all_mocked): request = fakes.HTTPRequest.blank('/v2/fake/images/detail') response = self.controller.detail(request) + + get_all_mocked.assert_called_once_with(mock.ANY, filters={}) response_list = response["images"] image_125 = copy.deepcopy(self.expected_image_124["image"]) @@ -254,49 +260,24 @@ def test_get_image_details(self): self.assertThat(expected, matchers.DictListMatches(response_list)) - def test_get_image_details_with_limit(self): + @mock.patch('nova.image.api.API.get_all') + def test_get_image_details_with_limit(self, get_all_mocked): request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2') - response = self.controller.detail(request) - response_list = response["images"] - response_links = response["images_links"] - - expected = [self.expected_image_123["image"], - self.expected_image_124["image"]] + self.controller.detail(request) + get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={}) - self.assertThat(expected, matchers.DictListMatches(response_list)) - - href_parts = urlparse.urlparse(response_links[0]['href']) - self.assertEqual('/v2/fake/images', href_parts.path) - params = urlparse.parse_qs(href_parts.query) - - self.assertThat({'limit': ['2'], 'marker': ['124']}, - matchers.DictMatches(params)) - - def test_get_image_details_with_limit_and_page_size(self): + @mock.patch('nova.image.api.API.get_all') + def test_get_image_details_with_limit_and_page_size(self, get_all_mocked): request = fakes.HTTPRequest.blank( '/v2/fake/images/detail?limit=2&page_size=1') - response = self.controller.detail(request) - response_list = response["images"] - response_links = response["images_links"] - - expected = [self.expected_image_123["image"], - self.expected_image_124["image"]] - - self.assertThat(expected, matchers.DictListMatches(response_list)) - - href_parts = urlparse.urlparse(response_links[0]['href']) - self.assertEqual('/v2/fake/images', href_parts.path) - params = urlparse.parse_qs(href_parts.query) + self.controller.detail(request) + get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={}, + page_size=1) - self.assertThat({'limit': ['2'], 'page_size': ['1'], - 'marker': ['124']}, matchers.DictMatches(params)) - - def _detail_request(self, filters, request): - context = request.environ['nova.context'] - self.image_service.detail(context, filters=filters).AndReturn([]) - self.mox.ReplayAll() - controller = images.Controller(image_service=self.image_service) - controller.detail(request) + @mock.patch('nova.image.api.API.get_all') + def _detail_request(self, filters, request, get_all_mocked): + self.controller.detail(request) + get_all_mocked.assert_called_once_with(mock.ANY, filters=filters) def test_image_detail_filter_with_name(self): filters = {'name': 'testname'} @@ -348,14 +329,11 @@ def test_image_detail_no_filters(self): request = fakes.HTTPRequest.blank('/v2/fake/images/detail') self._detail_request(filters, request) - def test_image_detail_invalid_marker(self): - class InvalidImageService(object): - def detail(self, *args, **kwargs): - raise exception.Invalid('meow') - + @mock.patch('nova.image.api.API.get_all', side_effect=exception.Invalid) + def test_image_detail_invalid_marker(self, _get_all_mocked): request = fakes.HTTPRequest.blank('/v2/images?marker=invalid') - controller = images.Controller(image_service=InvalidImageService()) - self.assertRaises(webob.exc.HTTPBadRequest, controller.detail, request) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, + request) def test_generate_alternate_link(self): view = images_view.ViewBuilder() @@ -364,25 +342,26 @@ def test_generate_alternate_link(self): actual_url = "%s/fake/images/1" % glance.generate_glance_url() self.assertEqual(generated_url, actual_url) - def test_delete_image(self): + @mock.patch('nova.image.api.API.delete') + def test_delete_image(self, delete_mocked): request = fakes.HTTPRequest.blank('/v2/fake/images/124') request.method = 'DELETE' response = self.controller.delete(request, '124') self.assertEqual(response.status_int, 204) + delete_mocked.assert_called_once_with(mock.ANY, '124') - def test_delete_deleted_image(self): - """If you try to delete a deleted image, you get back 403 Forbidden.""" - - deleted_image_id = 128 - # see nova.tests.api.openstack.fakes:_make_image_fixtures - - request = fakes.HTTPRequest.blank( - '/v2/fake/images/%s' % deleted_image_id) + @mock.patch('nova.image.api.API.delete', + side_effect=exception.ImageNotAuthorized(image_id='123')) + def test_delete_deleted_image(self, _delete_mocked): + # If you try to delete a deleted image, you get back 403 Forbidden. + request = fakes.HTTPRequest.blank('/v2/fake/images/123') request.method = 'DELETE' self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, - request, '%s' % deleted_image_id) + request, '123') - def test_delete_image_not_found(self): + @mock.patch('nova.image.api.API.delete', + side_effect=exception.ImageNotFound(image_id='123')) + def test_delete_image_not_found(self, _delete_mocked): request = fakes.HTTPRequest.blank('/v2/fake/images/300') request.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 950cadef21..04c29d0bdb 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -30,7 +30,6 @@ from nova import exception from nova.image import glance from nova import objects -from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import uuidutils from nova import test @@ -88,13 +87,9 @@ def setUp(self): self.stubs.Set(db, 'instance_update_and_get_original', instance_update_and_get_original) - fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fake.stub_out_image_service(self.stubs) - service_class = 'nova.image.glance.GlanceImageService' - self.service = importutils.import_object(service_class) - self.sent_to_glance = {} self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index f0afb8daea..72df1c3d5d 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -248,6 +248,9 @@ def validate_networks(self, context, networks, max_count): fake_network.stub_out_nw_api_get_instance_nw_info(stubs) +# TODO(jaypipes): Remove this when stub_out_glance() is removed after +# image metadata pieces are fixed to call nova.image.API instead of the +# nova.image.glance module directly. def _make_image_fixtures(): NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" diff --git a/nova/tests/image_fixtures.py b/nova/tests/image_fixtures.py new file mode 100644 index 0000000000..771b5e1a3f --- /dev/null +++ b/nova/tests/image_fixtures.py @@ -0,0 +1,78 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +# nova.image.glance._translate_from_glance() returns datetime +# objects, not strings. +NOW_DATE = datetime.datetime(2010, 10, 11, 10, 30, 22) + + +def get_image_fixtures(): + """Returns a set of image fixture dicts for use in unit tests. + + Returns a set of dicts representing images/snapshots of varying statuses + that would be returned from a call to + `glanceclient.client.Client.images.list`. The IDs of the images returned + start at 123 and go to 131, with the following brief summary of image + attributes: + + ID Type Status Notes + ----------------------------------------------------------------- + 123 Public image active + 124 Snapshot queued + 125 Snapshot saving + 126 Snapshot active + 127 Snapshot killed + 128 Snapshot deleted + 129 Snapshot pending_delete + 130 Public image active Has no name + """ + + image_id = 123 + + fixtures = [] + + def add_fixture(**kwargs): + kwargs.update(created_at=NOW_DATE, + updated_at=NOW_DATE) + fixtures.append(kwargs) + + # Public image + add_fixture(id=str(image_id), name='public image', is_public=True, + status='active', properties={'key1': 'value1'}, + min_ram="128", min_disk="10", size='25165824') + image_id += 1 + + # Snapshot for User 1 + uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74' + snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'} + for status in ('queued', 'saving', 'active', 'killed', + 'deleted', 'pending_delete'): + deleted = False if status != 'deleted' else True + deleted_at = NOW_DATE if deleted else None + + add_fixture(id=str(image_id), name='%s snapshot' % status, + is_public=False, status=status, + properties=snapshot_properties, size='25165824', + deleted=deleted, deleted_at=deleted_at) + image_id += 1 + + # Image without a name + add_fixture(id=str(image_id), is_public=True, status='active', + properties={}) + # Image for permission tests + image_id += 1 + add_fixture(id=str(image_id), is_public=True, status='active', + properties={}, owner='authorized_fake') + + return fixtures From e8be54105b9e6ca1f8d44ed80136897cd21784e5 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 21 Jul 2014 07:20:40 +0800 Subject: [PATCH 098/486] Downgrade the warn log in network to debug Downgrade the log from warn to debug for overquota. Didn't remove it because it's called internally and it might be helpful for operater to take a look. Change-Id: I1d3c27a8d97085d9919180a28018b234392f28e4 --- nova/network/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index e47d2adc25..7587aa350c 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -873,8 +873,8 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs): user_id=quota_user) cleanup.append(functools.partial(quotas.rollback, context)) except exception.OverQuota: - LOG.warn(_("Quota exceeded for %s, tried to allocate " - "fixed IP"), context.project_id) + LOG.debug("Quota exceeded for %s, tried to allocate " + "fixed IP", context.project_id) raise exception.FixedIpLimitExceeded() try: From 4dcfa79058a407eb7a6ce83f4d804df19ec95026 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 21 Jul 2014 07:53:07 +0800 Subject: [PATCH 099/486] Add context as param to cleanup function In nova-network, when we start to cleanup the stuffs it did when handle exception but the function is following in objects/fixed_ips.py def disassociate(self, context): so the context param is lost and it can lead to problem. Change-Id: I66a1681f47fc69bf0fd92137ee27481659833bef Closes-Bug: #1350268 --- nova/network/manager.py | 2 +- nova/tests/network/test_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index e47d2adc25..c4f3e21bf2 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -893,7 +893,7 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs): fip.allocated = True fip.virtual_interface_id = vif.id fip.save() - cleanup.append(fip.disassociate) + cleanup.append(functools.partial(fip.disassociate, context)) self._do_trigger_security_group_members_refresh_for_instance( instance_id) diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index e2f62e2617..f36a4ebb28 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -730,7 +730,7 @@ def test_allocate_fixed_ip_cleanup(self, mock.call(instance.uuid, '') ]) - mock_fixedip_disassociate.assert_called_once() + mock_fixedip_disassociate.assert_called_once_with(self.context) class FlatDHCPNetworkTestCase(test.TestCase): From 1ae738371142d47c5d036a79bfb038108c0a7c17 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 21 Jul 2014 06:04:31 +0000 Subject: [PATCH 100/486] Imported Translations from Transifex Change-Id: I7686f34f321ed13c8806f680763f1876d615c88d --- nova/locale/de/LC_MESSAGES/nova-log-info.po | 89 +- .../locale/en_AU/LC_MESSAGES/nova-log-info.po | 89 +- .../en_GB/LC_MESSAGES/nova-log-error.po | 97 +- .../locale/en_GB/LC_MESSAGES/nova-log-info.po | 91 +- nova/locale/en_US/LC_MESSAGES/nova.po | 2547 ++++++++-------- .../es/LC_MESSAGES/nova-log-critical.po | 33 + nova/locale/es/LC_MESSAGES/nova-log-error.po | 101 +- nova/locale/es/LC_MESSAGES/nova-log-info.po | 93 +- .../locale/es/LC_MESSAGES/nova-log-warning.po | 110 +- nova/locale/es/LC_MESSAGES/nova.po | 2571 +++++++++-------- .../fr/LC_MESSAGES/nova-log-critical.po | 13 +- nova/locale/fr/LC_MESSAGES/nova-log-error.po | 97 +- nova/locale/fr/LC_MESSAGES/nova-log-info.po | 93 +- nova/locale/it/LC_MESSAGES/nova-log-info.po | 143 +- nova/locale/ja/LC_MESSAGES/nova-log-error.po | 97 +- nova/locale/ja/LC_MESSAGES/nova-log-info.po | 89 +- .../ko_KR/LC_MESSAGES/nova-log-error.po | 97 +- .../locale/ko_KR/LC_MESSAGES/nova-log-info.po | 89 +- nova/locale/nova-log-critical.pot | 16 +- nova/locale/nova-log-error.pot | 97 +- nova/locale/nova-log-info.pot | 91 +- nova/locale/nova-log-warning.pot | 103 +- nova/locale/nova.pot | 2539 ++++++++-------- .../pt_BR/LC_MESSAGES/nova-log-error.po | 97 +- .../locale/pt_BR/LC_MESSAGES/nova-log-info.po | 91 +- .../zh_CN/LC_MESSAGES/nova-log-error.po | 97 +- .../locale/zh_CN/LC_MESSAGES/nova-log-info.po | 91 +- .../locale/zh_TW/LC_MESSAGES/nova-log-info.po | 89 +- 28 files changed, 5130 insertions(+), 4720 deletions(-) create mode 100644 nova/locale/es/LC_MESSAGES/nova-log-critical.po diff --git a/nova/locale/de/LC_MESSAGES/nova-log-info.po b/nova/locale/de/LC_MESSAGES/nova-log-info.po index 41f4e94e77..6101301ce3 100644 --- a/nova/locale/de/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/de/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: German (http://www.transifex.com/projects/p/nova/language/" @@ -19,28 +19,34 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" "Überspringe periodische Aufgabe %(task)s weil der Intervall negativ ist" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Überspringe periodische Aufgabe %(task)s weil sie deaktiviert ist" @@ -103,121 +109,126 @@ msgstr "Lösche doppelte Zeile mit der ID %(id)s aus der Tabelle %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -230,7 +241,7 @@ msgstr "" msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -287,11 +298,11 @@ msgstr "" msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po index 196c1ec0d5..9f8d2df148 100644 --- a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (Australia) (http://www.transifex.com/projects/p/nova/" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet backdoor listening on %(port)s for process %(pid)d" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "Skipping periodic task %(task)s because its interval is negative" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Skipping periodic task %(task)s because it is disabled" @@ -101,121 +107,126 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -228,7 +239,7 @@ msgstr "" msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -285,11 +296,11 @@ msgstr "" msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po index 8bd8939bcb..0219e8f891 100644 --- a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" @@ -44,6 +44,11 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Failed to notify cells of instance fault" @@ -58,7 +63,7 @@ msgstr "Original exception being dropped: %s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "Unexpected exception occurred %d time(s)... retrying." -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -71,22 +76,22 @@ msgstr "in fixed duration looping call" msgid "in dynamic looping call" msgstr "in dynamic looping call" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "Error during %(full_task_name)s: %(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "Failed to understand rule %s" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "No handler for matches of kind %s" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "Failed to understand rule %r" @@ -116,137 +121,133 @@ msgstr "DB exception wrapped." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -266,20 +267,20 @@ msgstr "" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -288,12 +289,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po index ef67439723..dfc10157d2 100644 --- a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-30 05:01+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" "nova/language/en_GB/)\n" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet backdoor listening on %(port)s for process %(pid)d" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "Created lock path: %s" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "Skipping periodic task %(task)s because its interval is negative" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Skipping periodic task %(task)s because it is disabled" @@ -101,95 +107,100 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "Instance destroyed successfully." -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "Instance may be started again." -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "Going to destroy instance again." -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "Beginning live snapshot process" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "Beginning cold snapshot process" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "Snapshot extracted, beginning image upload" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "Snapshot image upload complete" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "Instance soft rebooted successfully." -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "Instance shutdown successfully." -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "Instance may have been rebooted during soft reboot, so return now." -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "Instance rebooted successfully." -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "Instance spawned successfully." -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Truncated console log returned, %d bytes ignored" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "Creating image" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "Using config drive" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creating config drive at %(path)s" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -198,26 +209,26 @@ msgstr "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "Instance running successfully." -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -230,7 +241,7 @@ msgstr "Called setup_basic_filtering in nwfilter" msgid "Ensuring static filters" msgstr "Ensuring static filters" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "Attempted to unfilter instance which is not filtered" @@ -290,11 +301,11 @@ msgstr "Corrupt base files: %s" msgid "Removable base files: %s" msgstr "Removable base files: %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/en_US/LC_MESSAGES/nova.po b/nova/locale/en_US/LC_MESSAGES/nova.po index e94fd6ea3f..394e7283b2 100644 --- a/nova/locale/en_US/LC_MESSAGES/nova.po +++ b/nova/locale/en_US/LC_MESSAGES/nova.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Nova\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/nova\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2013-01-21 18:28+0000\n" "Last-Translator: Jeremy Stanley \n" "Language-Team: en_US \n" @@ -17,39 +17,39 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/block_device.py:99 +#: nova/block_device.py:100 msgid "Some fields are invalid." msgstr "" -#: nova/block_device.py:109 +#: nova/block_device.py:110 msgid "Some required fields are missing" msgstr "" -#: nova/block_device.py:125 +#: nova/block_device.py:126 msgid "Boot index is invalid." msgstr "" -#: nova/block_device.py:168 +#: nova/block_device.py:169 msgid "Unrecognized legacy format." msgstr "" -#: nova/block_device.py:185 +#: nova/block_device.py:186 msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:189 +#: nova/block_device.py:190 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:368 +#: nova/block_device.py:369 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:372 +#: nova/block_device.py:373 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:382 +#: nova/block_device.py:383 msgid "Invalid volume_size." msgstr "" @@ -406,49 +406,49 @@ msgstr "" msgid "Failed to deploy instance: %(reason)s" msgstr "" -#: nova/exception.py:402 +#: nova/exception.py:402 nova/exception.py:406 #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "" -#: nova/exception.py:406 +#: nova/exception.py:410 msgid "Service is unavailable at this time." msgstr "Service is unavailable at this time." -#: nova/exception.py:410 +#: nova/exception.py:414 #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "" -#: nova/exception.py:414 +#: nova/exception.py:418 #, python-format msgid "Connection to the hypervisor is broken on host: %(host)s" msgstr "" -#: nova/exception.py:418 +#: nova/exception.py:422 #, fuzzy, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "Compute service is unavailable at this time." -#: nova/exception.py:422 +#: nova/exception.py:426 #, python-format msgid "Compute service of %(host)s is still in use." msgstr "" -#: nova/exception.py:426 +#: nova/exception.py:430 #, python-format msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -#: nova/exception.py:431 +#: nova/exception.py:435 msgid "The supplied hypervisor type of is invalid." msgstr "The supplied hypervisor type of is invalid." -#: nova/exception.py:435 +#: nova/exception.py:439 msgid "The instance requires a newer hypervisor version than has been provided." msgstr "The instance requires a newer hypervisor version than has been provided." -#: nova/exception.py:440 +#: nova/exception.py:444 #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " @@ -457,32 +457,32 @@ msgstr "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." -#: nova/exception.py:445 +#: nova/exception.py:449 #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "The supplied device path (%(path)s) is invalid." -#: nova/exception.py:449 +#: nova/exception.py:453 #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "The supplied device path (%(path)s) is in use." -#: nova/exception.py:454 +#: nova/exception.py:458 #, python-format msgid "The supplied device (%(device)s) is busy." msgstr "The supplied device (%(device)s) is busy." -#: nova/exception.py:458 +#: nova/exception.py:462 #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "" -#: nova/exception.py:462 +#: nova/exception.py:466 #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s is not a valid IP v4/6 address." -#: nova/exception.py:466 +#: nova/exception.py:470 #, python-format msgid "" "VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " @@ -491,7 +491,7 @@ msgstr "" "VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " "tag is %(tag)s, but the one associated with the port group is %(pgroup)s." -#: nova/exception.py:472 +#: nova/exception.py:476 #, python-format msgid "" "vSwitch which contains the port group %(bridge)s is not associated with " @@ -502,111 +502,111 @@ msgstr "" "the desired physical adapter. Expected vSwitch is %(expected)s, but the " "one associated is %(actual)s." -#: nova/exception.py:479 +#: nova/exception.py:483 #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Disk format %(disk_format)s is not acceptable" -#: nova/exception.py:483 +#: nova/exception.py:487 #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "" -#: nova/exception.py:487 +#: nova/exception.py:491 #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "" -#: nova/exception.py:491 +#: nova/exception.py:495 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Image %(image_id)s is unacceptable: %(reason)s" -#: nova/exception.py:495 +#: nova/exception.py:499 #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "Instance %(instance_id)s is unacceptable: %(reason)s" -#: nova/exception.py:499 +#: nova/exception.py:503 #, python-format msgid "Ec2 id %(ec2_id)s is unacceptable." msgstr "Ec2 id %(ec2_id)s is unacceptable." -#: nova/exception.py:503 +#: nova/exception.py:507 #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Expected a uuid but received %(uuid)s." -#: nova/exception.py:507 +#: nova/exception.py:511 #, fuzzy, python-format msgid "Invalid ID received %(id)s." msgstr "Invalid cidr %(cidr)s." -#: nova/exception.py:511 +#: nova/exception.py:515 msgid "Constraint not met." msgstr "Constraint not met." -#: nova/exception.py:516 +#: nova/exception.py:520 msgid "Resource could not be found." msgstr "Resource could not be found." -#: nova/exception.py:521 +#: nova/exception.py:525 #, fuzzy, python-format msgid "No agent-build associated with id %(id)s." msgstr "No fixed IP associated with id %(id)s." -#: nova/exception.py:525 +#: nova/exception.py:529 #, python-format msgid "" "Agent-build with hypervisor %(hypervisor)s os %(os)s architecture " "%(architecture)s exists." msgstr "" -#: nova/exception.py:531 +#: nova/exception.py:535 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Volume %(volume_id)s could not be found." -#: nova/exception.py:535 +#: nova/exception.py:539 #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "" -#: nova/exception.py:540 +#: nova/exception.py:544 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Snapshot %(snapshot_id)s could not be found." -#: nova/exception.py:544 +#: nova/exception.py:548 #, python-format msgid "No disk at %(location)s" msgstr "No disk at %(location)s" -#: nova/exception.py:548 +#: nova/exception.py:552 #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "Could not find a handler for %(driver_type)s volume." -#: nova/exception.py:552 +#: nova/exception.py:556 #, python-format msgid "Invalid image href %(image_href)s." msgstr "Invalid image href %(image_href)s." -#: nova/exception.py:556 +#: nova/exception.py:560 #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" -#: nova/exception.py:561 +#: nova/exception.py:565 #, python-format msgid "Image %(image_id)s could not be found." msgstr "Image %(image_id)s could not be found." -#: nova/exception.py:565 +#: nova/exception.py:569 msgid "The current driver does not support preserving ephemeral partitions." msgstr "" -#: nova/exception.py:571 +#: nova/exception.py:575 #, python-format msgid "" "Image %(image_id)s could not be found. The nova EC2 API assigns image ids" @@ -617,148 +617,148 @@ msgstr "" " dynamically when they are listed for the first time. Have you listed " "image ids since adding this image?" -#: nova/exception.py:578 +#: nova/exception.py:582 #, python-format msgid "Project %(project_id)s could not be found." msgstr "Project %(project_id)s could not be found." -#: nova/exception.py:582 +#: nova/exception.py:586 msgid "Cannot find SR to read/write VDI." msgstr "Cannot find SR to read/write VDI." -#: nova/exception.py:586 +#: nova/exception.py:590 #, fuzzy, python-format msgid "Network %(network_id)s is duplicated." msgstr "Network %(network_id)s is still in use." -#: nova/exception.py:590 +#: nova/exception.py:594 #, python-format msgid "Network %(network_id)s is still in use." msgstr "Network %(network_id)s is still in use." -#: nova/exception.py:594 +#: nova/exception.py:598 #, python-format msgid "%(req)s is required to create a network." msgstr "%(req)s is required to create a network." -#: nova/exception.py:598 +#: nova/exception.py:602 #, python-format msgid "Network %(network_id)s could not be found." msgstr "Network %(network_id)s could not be found." -#: nova/exception.py:602 +#: nova/exception.py:606 #, fuzzy, python-format msgid "Port id %(port_id)s could not be found." msgstr "Port %(port_id)s could not be found." -#: nova/exception.py:606 +#: nova/exception.py:610 #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "Network could not be found for bridge %(bridge)s" -#: nova/exception.py:610 +#: nova/exception.py:614 #, python-format msgid "Network could not be found for uuid %(uuid)s" msgstr "Network could not be found for uuid %(uuid)s" -#: nova/exception.py:614 +#: nova/exception.py:618 #, python-format msgid "Network could not be found with cidr %(cidr)s." msgstr "Network could not be found with cidr %(cidr)s." -#: nova/exception.py:618 +#: nova/exception.py:622 #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "Network could not be found for instance %(instance_id)s." -#: nova/exception.py:622 +#: nova/exception.py:626 msgid "No networks defined." msgstr "No networks defined." -#: nova/exception.py:626 +#: nova/exception.py:630 msgid "No more available networks." msgstr "" -#: nova/exception.py:630 +#: nova/exception.py:634 #, python-format msgid "" "Either network uuid %(network_uuid)s is not present or is not assigned to" " the project %(project_id)s." msgstr "" -#: nova/exception.py:635 +#: nova/exception.py:639 msgid "" "More than one possible network found. Specify network ID(s) to select " "which one(s) to connect to," msgstr "" -#: nova/exception.py:640 +#: nova/exception.py:644 #, python-format msgid "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" -#: nova/exception.py:645 +#: nova/exception.py:649 #, python-format msgid "" "It is not allowed to create an interface on external network " "%(network_uuid)s" msgstr "" -#: nova/exception.py:650 +#: nova/exception.py:654 msgid "Could not find the datastore reference(s) which the VM uses." msgstr "Could not find the datastore reference(s) which the VM uses." -#: nova/exception.py:654 +#: nova/exception.py:658 #, python-format msgid "Port %(port_id)s is still in use." msgstr "Port %(port_id)s is still in use." -#: nova/exception.py:658 +#: nova/exception.py:662 #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "" -#: nova/exception.py:662 +#: nova/exception.py:666 #, fuzzy, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Network could not be found for instance %(instance_id)s." -#: nova/exception.py:666 +#: nova/exception.py:670 #, fuzzy, python-format msgid "No free port available for instance %(instance)s." msgstr "Network could not be found for instance %(instance_id)s." -#: nova/exception.py:670 +#: nova/exception.py:674 #, python-format msgid "Fixed ip %(address)s already exists." msgstr "" -#: nova/exception.py:674 +#: nova/exception.py:678 #, python-format msgid "No fixed IP associated with id %(id)s." msgstr "No fixed IP associated with id %(id)s." -#: nova/exception.py:678 +#: nova/exception.py:682 #, python-format msgid "Fixed ip not found for address %(address)s." msgstr "Fixed ip not found for address %(address)s." -#: nova/exception.py:682 +#: nova/exception.py:686 #, python-format msgid "Instance %(instance_uuid)s has zero fixed ips." msgstr "Instance %(instance_uuid)s has zero fixed ips." -#: nova/exception.py:686 +#: nova/exception.py:690 #, python-format msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." msgstr "Network host %(host)s has zero fixed ips in network %(network_id)s." -#: nova/exception.py:691 +#: nova/exception.py:695 #, python-format msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." msgstr "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." -#: nova/exception.py:695 +#: nova/exception.py:699 #, python-format msgid "" "Fixed IP address (%(address)s) does not exist in network " @@ -767,7 +767,7 @@ msgstr "" "Fixed IP address (%(address)s) does not exist in network " "(%(network_uuid)s)." -#: nova/exception.py:700 +#: nova/exception.py:704 #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance " @@ -776,126 +776,126 @@ msgstr "" "Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s." -#: nova/exception.py:705 +#: nova/exception.py:709 #, python-format msgid "More than one instance is associated with fixed ip address '%(address)s'." msgstr "More than one instance is associated with fixed ip address '%(address)s'." -#: nova/exception.py:710 +#: nova/exception.py:714 #, python-format msgid "Fixed IP address %(address)s is invalid." msgstr "Fixed IP address %(address)s is invalid." -#: nova/exception.py:715 +#: nova/exception.py:719 msgid "Zero fixed ips available." msgstr "Zero fixed ips available." -#: nova/exception.py:719 +#: nova/exception.py:723 msgid "Zero fixed ips could be found." msgstr "Zero fixed ips could be found." -#: nova/exception.py:723 +#: nova/exception.py:727 #, python-format msgid "Floating ip %(address)s already exists." msgstr "Floating ip %(address)s already exists." -#: nova/exception.py:728 +#: nova/exception.py:732 #, python-format msgid "Floating ip not found for id %(id)s." msgstr "Floating ip not found for id %(id)s." -#: nova/exception.py:732 +#: nova/exception.py:736 #, python-format msgid "The DNS entry %(name)s already exists in domain %(domain)s." msgstr "The DNS entry %(name)s already exists in domain %(domain)s." -#: nova/exception.py:736 +#: nova/exception.py:740 #, python-format msgid "Floating ip not found for address %(address)s." msgstr "Floating ip not found for address %(address)s." -#: nova/exception.py:740 +#: nova/exception.py:744 #, python-format msgid "Floating ip not found for host %(host)s." msgstr "Floating ip not found for host %(host)s." -#: nova/exception.py:744 +#: nova/exception.py:748 #, python-format msgid "Multiple floating ips are found for address %(address)s." msgstr "Multiple floating ips are found for address %(address)s." -#: nova/exception.py:748 +#: nova/exception.py:752 msgid "Floating ip pool not found." msgstr "Floating ip pool not found." -#: nova/exception.py:753 +#: nova/exception.py:757 msgid "Zero floating ips available." msgstr "Zero floating ips available." -#: nova/exception.py:759 +#: nova/exception.py:763 #, python-format msgid "Floating ip %(address)s is associated." msgstr "Floating ip %(address)s is associated." -#: nova/exception.py:763 +#: nova/exception.py:767 #, python-format msgid "Floating ip %(address)s is not associated." msgstr "Floating ip %(address)s is not associated." -#: nova/exception.py:767 +#: nova/exception.py:771 msgid "Zero floating ips exist." msgstr "Zero floating ips exist." -#: nova/exception.py:772 +#: nova/exception.py:776 #, python-format msgid "Interface %(interface)s not found." msgstr "Interface %(interface)s not found." -#: nova/exception.py:777 nova/api/openstack/compute/contrib/floating_ips.py:97 +#: nova/exception.py:781 nova/api/openstack/compute/contrib/floating_ips.py:97 msgid "Cannot disassociate auto assigned floating ip" msgstr "Cannot disassociate auto assigned floating ip" -#: nova/exception.py:782 +#: nova/exception.py:786 #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "Keypair %(name)s not found for user %(user_id)s" -#: nova/exception.py:786 +#: nova/exception.py:790 #, python-format msgid "Service %(service_id)s could not be found." msgstr "Service %(service_id)s could not be found." -#: nova/exception.py:790 +#: nova/exception.py:794 #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "" -#: nova/exception.py:794 +#: nova/exception.py:798 #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "" -#: nova/exception.py:798 +#: nova/exception.py:802 #, python-format msgid "Host %(host)s could not be found." msgstr "Host %(host)s could not be found." -#: nova/exception.py:802 +#: nova/exception.py:806 #, python-format msgid "Compute host %(host)s could not be found." msgstr "Compute host %(host)s could not be found." -#: nova/exception.py:806 +#: nova/exception.py:810 #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Could not find binary %(binary)s on host %(host)s." -#: nova/exception.py:810 +#: nova/exception.py:814 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Invalid reservation expiration %(expire)s." -#: nova/exception.py:814 +#: nova/exception.py:818 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " @@ -904,73 +904,73 @@ msgstr "" "Change would make usage less than 0 for the following resources: " "%(unders)s" -#: nova/exception.py:819 +#: nova/exception.py:823 msgid "Quota could not be found" msgstr "Quota could not be found" -#: nova/exception.py:823 +#: nova/exception.py:827 #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" -#: nova/exception.py:828 +#: nova/exception.py:832 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Unknown quota resources %(unknown)s." -#: nova/exception.py:832 +#: nova/exception.py:836 #, python-format msgid "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:837 +#: nova/exception.py:841 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Quota for project %(project_id)s could not be found." -#: nova/exception.py:841 +#: nova/exception.py:845 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Quota class %(class_name)s could not be found." -#: nova/exception.py:845 +#: nova/exception.py:849 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "Quota usage for project %(project_id)s could not be found." -#: nova/exception.py:849 +#: nova/exception.py:853 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Quota reservation %(uuid)s could not be found." -#: nova/exception.py:853 +#: nova/exception.py:857 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quota exceeded for resources: %(overs)s" -#: nova/exception.py:857 +#: nova/exception.py:861 #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Security group %(security_group_id)s not found." -#: nova/exception.py:861 +#: nova/exception.py:865 #, python-format msgid "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "Security group %(security_group_id)s not found for project %(project_id)s." -#: nova/exception.py:866 +#: nova/exception.py:870 #, python-format msgid "Security group with rule %(rule_id)s not found." msgstr "Security group with rule %(rule_id)s not found." -#: nova/exception.py:871 +#: nova/exception.py:875 #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" -#: nova/exception.py:876 +#: nova/exception.py:880 #, python-format msgid "" "Security group %(security_group_id)s is already associated with the " @@ -979,7 +979,7 @@ msgstr "" "Security group %(security_group_id)s is already associated with the " "instance %(instance_id)s" -#: nova/exception.py:881 +#: nova/exception.py:885 #, python-format msgid "" "Security group %(security_group_id)s is not associated with the instance " @@ -988,49 +988,49 @@ msgstr "" "Security group %(security_group_id)s is not associated with the instance " "%(instance_id)s" -#: nova/exception.py:886 +#: nova/exception.py:890 #, fuzzy, python-format msgid "Security group default rule (%rule_id)s not found." msgstr "Security group with rule %(rule_id)s not found." -#: nova/exception.py:890 +#: nova/exception.py:894 msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" -#: nova/exception.py:896 +#: nova/exception.py:900 #, python-format msgid "Rule already exists in group: %(rule)s" msgstr "" -#: nova/exception.py:900 +#: nova/exception.py:904 msgid "No Unique Match Found." msgstr "" -#: nova/exception.py:905 +#: nova/exception.py:909 #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "Migration %(migration_id)s could not be found." -#: nova/exception.py:909 +#: nova/exception.py:913 #, python-format msgid "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "Migration not found for instance %(instance_id)s with status %(status)s." -#: nova/exception.py:914 +#: nova/exception.py:918 #, python-format msgid "Console pool %(pool_id)s could not be found." msgstr "Console pool %(pool_id)s could not be found." -#: nova/exception.py:918 +#: nova/exception.py:922 #, python-format msgid "" "Console pool with host %(host)s, console_type %(console_type)s and " "compute_host %(compute_host)s already exists." msgstr "" -#: nova/exception.py:924 +#: nova/exception.py:928 #, python-format msgid "" "Console pool of type %(console_type)s for compute host %(compute_host)s " @@ -1039,17 +1039,17 @@ msgstr "" "Console pool of type %(console_type)s for compute host %(compute_host)s " "on proxy host %(host)s not found." -#: nova/exception.py:930 +#: nova/exception.py:934 #, python-format msgid "Console %(console_id)s could not be found." msgstr "Console %(console_id)s could not be found." -#: nova/exception.py:934 +#: nova/exception.py:938 #, python-format msgid "Console for instance %(instance_uuid)s could not be found." msgstr "Console for instance %(instance_uuid)s could not be found." -#: nova/exception.py:938 +#: nova/exception.py:942 #, python-format msgid "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " @@ -1058,237 +1058,244 @@ msgstr "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " "found." -#: nova/exception.py:943 +#: nova/exception.py:947 #, fuzzy, python-format msgid "Invalid console type %(console_type)s" msgstr "Invalid console type %(console_type)s " -#: nova/exception.py:947 +#: nova/exception.py:951 #, python-format msgid "Unavailable console type %(console_type)s." msgstr "" -#: nova/exception.py:951 +#: nova/exception.py:955 #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" -#: nova/exception.py:956 +#: nova/exception.py:960 #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Flavor %(flavor_id)s could not be found." -#: nova/exception.py:960 +#: nova/exception.py:964 #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "" -#: nova/exception.py:964 +#: nova/exception.py:968 #, fuzzy, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "Flavor access not found for %(flavor_id) / %(project_id) combination." -#: nova/exception.py:969 +#: nova/exception.py:973 +#, python-format +msgid "" +"Flavor %(id)d extra spec cannot be updated or created after %(retries)d " +"retries." +msgstr "" + +#: nova/exception.py:978 #, fuzzy, python-format msgid "Cell %(cell_name)s doesn't exist." msgstr "pool %s doesn't exist" -#: nova/exception.py:973 +#: nova/exception.py:982 #, python-format msgid "Cell with name %(name)s already exists." msgstr "" -#: nova/exception.py:977 +#: nova/exception.py:986 #, python-format msgid "Inconsistency in cell routing: %(reason)s" msgstr "" -#: nova/exception.py:981 +#: nova/exception.py:990 #, python-format msgid "Service API method not found: %(detail)s" msgstr "" -#: nova/exception.py:985 +#: nova/exception.py:994 #, fuzzy msgid "Timeout waiting for response from cell" msgstr "Timed out waiting for RPC response: %s" -#: nova/exception.py:989 +#: nova/exception.py:998 #, python-format msgid "Cell message has reached maximum hop count: %(hop_count)s" msgstr "" -#: nova/exception.py:993 +#: nova/exception.py:1002 msgid "No cells available matching scheduling criteria." msgstr "" -#: nova/exception.py:997 +#: nova/exception.py:1006 msgid "Cannot update cells configuration file." msgstr "" -#: nova/exception.py:1001 +#: nova/exception.py:1010 #, fuzzy, python-format msgid "Cell is not known for instance %(instance_uuid)s" msgstr "Destroying VDIs for Instance %(instance_uuid)s" -#: nova/exception.py:1005 +#: nova/exception.py:1014 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Scheduler Host Filter %(filter_name)s could not be found." -#: nova/exception.py:1009 +#: nova/exception.py:1018 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" -#: nova/exception.py:1014 +#: nova/exception.py:1023 #, python-format msgid "" "Metric %(name)s could not be found on the compute host node " "%(host)s.%(node)s." msgstr "" -#: nova/exception.py:1019 +#: nova/exception.py:1028 #, python-format msgid "File %(file_path)s could not be found." msgstr "File %(file_path)s could not be found." -#: nova/exception.py:1023 +#: nova/exception.py:1032 msgid "Zero files could be found." msgstr "Zero files could be found." -#: nova/exception.py:1027 +#: nova/exception.py:1036 #, python-format msgid "Virtual switch associated with the network adapter %(adapter)s not found." msgstr "Virtual switch associated with the network adapter %(adapter)s not found." -#: nova/exception.py:1032 +#: nova/exception.py:1041 #, python-format msgid "Network adapter %(adapter)s could not be found." msgstr "Network adapter %(adapter)s could not be found." -#: nova/exception.py:1036 +#: nova/exception.py:1045 #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Class %(class_name)s could not be found: %(exception)s" -#: nova/exception.py:1040 +#: nova/exception.py:1049 msgid "Action not allowed." msgstr "Action not allowed." -#: nova/exception.py:1044 +#: nova/exception.py:1053 msgid "Rotation is not allowed for snapshots" msgstr "Rotation is not allowed for snapshots" -#: nova/exception.py:1048 +#: nova/exception.py:1057 msgid "Rotation param is required for backup image_type" msgstr "Rotation param is required for backup image_type" -#: nova/exception.py:1053 nova/tests/compute/test_keypairs.py:144 +#: nova/exception.py:1062 nova/tests/compute/test_keypairs.py:144 #, fuzzy, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "Key pair %(key_name)s already exists." -#: nova/exception.py:1057 +#: nova/exception.py:1066 #, python-format msgid "Instance %(name)s already exists." msgstr "Instance %(name)s already exists." -#: nova/exception.py:1061 +#: nova/exception.py:1070 #, python-format msgid "Flavor with name %(name)s already exists." msgstr "" -#: nova/exception.py:1065 +#: nova/exception.py:1074 #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "" -#: nova/exception.py:1069 +#: nova/exception.py:1078 #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" -#: nova/exception.py:1074 +#: nova/exception.py:1083 #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s is not on shared storage: %(reason)s" -#: nova/exception.py:1078 +#: nova/exception.py:1087 #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s is not on local storage: %(reason)s" -#: nova/exception.py:1082 +#: nova/exception.py:1091 #, python-format msgid "Storage error: %(reason)s" msgstr "" -#: nova/exception.py:1086 +#: nova/exception.py:1095 #, python-format msgid "Migration error: %(reason)s" msgstr "" -#: nova/exception.py:1090 +#: nova/exception.py:1099 #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "" -#: nova/exception.py:1094 +#: nova/exception.py:1103 #, python-format msgid "Malformed message body: %(reason)s" msgstr "Malformed message body: %(reason)s" -#: nova/exception.py:1100 +#: nova/exception.py:1109 #, python-format msgid "Could not find config at %(path)s" msgstr "Could not find config at %(path)s" -#: nova/exception.py:1104 +#: nova/exception.py:1113 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Could not load paste app '%(name)s' from %(path)s" -#: nova/exception.py:1108 +#: nova/exception.py:1117 msgid "When resizing, instances must change flavor!" msgstr "When resizing, instances must change flavor!" -#: nova/exception.py:1112 +#: nova/exception.py:1121 #, python-format msgid "Resize error: %(reason)s" msgstr "" -#: nova/exception.py:1116 +#: nova/exception.py:1125 #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" -#: nova/exception.py:1120 +#: nova/exception.py:1129 msgid "Flavor's memory is too small for requested image." msgstr "" -#: nova/exception.py:1124 +#: nova/exception.py:1133 msgid "Flavor's disk is too small for requested image." msgstr "" -#: nova/exception.py:1128 +#: nova/exception.py:1137 #, python-format msgid "Insufficient free memory on compute node to start %(uuid)s." msgstr "Insufficient free memory on compute node to start %(uuid)s." -#: nova/exception.py:1132 +#: nova/exception.py:1141 #, python-format msgid "No valid host was found. %(reason)s" msgstr "No valid host was found. %(reason)s" -#: nova/exception.py:1137 +#: nova/exception.py:1146 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "" -#: nova/exception.py:1144 +#: nova/exception.py:1153 #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " @@ -1297,45 +1304,45 @@ msgstr "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " "%(used)d of %(allowed)d %(resource)s" -#: nova/exception.py:1149 +#: nova/exception.py:1158 msgid "Maximum number of floating ips exceeded" msgstr "Maximum number of floating ips exceeded" -#: nova/exception.py:1153 +#: nova/exception.py:1162 #, fuzzy msgid "Maximum number of fixed ips exceeded" msgstr "Maximum number of floating ips exceeded" -#: nova/exception.py:1157 +#: nova/exception.py:1166 #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Maximum number of metadata items exceeds %(allowed)d" -#: nova/exception.py:1161 +#: nova/exception.py:1170 msgid "Personality file limit exceeded" msgstr "Personality file limit exceeded" -#: nova/exception.py:1165 +#: nova/exception.py:1174 msgid "Personality file path too long" msgstr "Personality file path too long" -#: nova/exception.py:1169 +#: nova/exception.py:1178 msgid "Personality file content too long" msgstr "Personality file content too long" -#: nova/exception.py:1173 nova/tests/compute/test_keypairs.py:155 +#: nova/exception.py:1182 nova/tests/compute/test_keypairs.py:155 msgid "Maximum number of key pairs exceeded" msgstr "Maximum number of key pairs exceeded" -#: nova/exception.py:1178 +#: nova/exception.py:1187 msgid "Maximum number of security groups or rules exceeded" msgstr "Maximum number of security groups or rules exceeded" -#: nova/exception.py:1182 +#: nova/exception.py:1191 msgid "Maximum number of ports exceeded" msgstr "" -#: nova/exception.py:1186 +#: nova/exception.py:1195 #, python-format msgid "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " @@ -1344,130 +1351,130 @@ msgstr "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " "%(reason)s." -#: nova/exception.py:1191 +#: nova/exception.py:1200 #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Aggregate %(aggregate_id)s could not be found." -#: nova/exception.py:1195 +#: nova/exception.py:1204 #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "Aggregate %(aggregate_name)s already exists." -#: nova/exception.py:1199 +#: nova/exception.py:1208 #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "Aggregate %(aggregate_id)s has no host %(host)s." -#: nova/exception.py:1203 +#: nova/exception.py:1212 #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." -#: nova/exception.py:1208 +#: nova/exception.py:1217 #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "Aggregate %(aggregate_id)s already has host %(host)s." -#: nova/exception.py:1212 +#: nova/exception.py:1221 msgid "Unable to create flavor" msgstr "" -#: nova/exception.py:1216 +#: nova/exception.py:1225 #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "Failed to set admin password on %(instance)s because %(reason)s" -#: nova/exception.py:1222 +#: nova/exception.py:1231 #, python-format msgid "Detected existing vlan with id %(vlan)d" msgstr "Detected existing vlan with id %(vlan)d" -#: nova/exception.py:1226 +#: nova/exception.py:1235 msgid "There was a conflict when trying to complete your request." msgstr "" -#: nova/exception.py:1232 +#: nova/exception.py:1241 #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "Instance %(instance_id)s could not be found." -#: nova/exception.py:1236 +#: nova/exception.py:1245 #, fuzzy, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "Console for instance %(instance_uuid)s could not be found." -#: nova/exception.py:1241 +#: nova/exception.py:1250 #, fuzzy, python-format msgid "Node %(node_id)s could not be found." msgstr "Volume %(volume_id)s could not be found." -#: nova/exception.py:1245 +#: nova/exception.py:1254 #, fuzzy, python-format msgid "Node with UUID %(node_uuid)s could not be found." msgstr "Port %(port_id)s could not be found." -#: nova/exception.py:1249 +#: nova/exception.py:1258 #, python-format msgid "Marker %(marker)s could not be found." msgstr "Marker %(marker)s could not be found." -#: nova/exception.py:1254 +#: nova/exception.py:1263 #, python-format msgid "Invalid id: %(val)s (expecting \"i-...\")." msgstr "Invalid id: %(val)s (expecting \"i-...\")." -#: nova/exception.py:1258 +#: nova/exception.py:1267 #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Could not fetch image %(image_id)s" -#: nova/exception.py:1262 +#: nova/exception.py:1271 #, fuzzy, python-format msgid "Could not upload image %(image_id)s" msgstr "Could not fetch image %(image_id)s" -#: nova/exception.py:1266 +#: nova/exception.py:1275 #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "Task %(task_name)s is already running on host %(host)s" -#: nova/exception.py:1270 +#: nova/exception.py:1279 #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "Task %(task_name)s is not running on host %(host)s" -#: nova/exception.py:1274 +#: nova/exception.py:1283 #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "Instance %(instance_uuid)s is locked" -#: nova/exception.py:1278 +#: nova/exception.py:1287 #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "" -#: nova/exception.py:1282 +#: nova/exception.py:1291 #, python-format msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" -#: nova/exception.py:1287 +#: nova/exception.py:1296 #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "Unknown config drive format %(format)s. Select one of iso9660 or vfat." -#: nova/exception.py:1292 +#: nova/exception.py:1301 #, fuzzy, python-format msgid "Failed to attach network adapter device to %(instance)s" msgstr "Failed to dealloc network for deleted instance" -#: nova/exception.py:1296 +#: nova/exception.py:1305 #, fuzzy, python-format msgid "Failed to detach network adapter device from %(instance)s" msgstr "Failed to dealloc network for deleted instance" -#: nova/exception.py:1300 +#: nova/exception.py:1309 #, python-format msgid "" "User data too large. User data must be no larger than %(maxsize)s bytes " @@ -1476,332 +1483,362 @@ msgstr "" "User data too large. User data must be no larger than %(maxsize)s bytes " "once base64 encoded. Your data is %(length)d bytes" -#: nova/exception.py:1306 +#: nova/exception.py:1315 msgid "User data needs to be valid base 64." msgstr "User data needs to be valid base 64." -#: nova/exception.py:1310 +#: nova/exception.py:1319 #, python-format msgid "" "Unexpected task state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1319 +#: nova/exception.py:1328 #, fuzzy, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not " "found" msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -#: nova/exception.py:1324 +#: nova/exception.py:1333 #, fuzzy, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Keypair %(name)s not found for user %(user_id)s" -#: nova/exception.py:1328 +#: nova/exception.py:1337 #, python-format msgid "" "Unexpected VM state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1333 +#: nova/exception.py:1342 #, python-format msgid "The CA file for %(project)s could not be found" msgstr "The CA file for %(project)s could not be found" -#: nova/exception.py:1337 +#: nova/exception.py:1346 #, python-format msgid "The CRL file for %(project)s could not be found" msgstr "The CRL file for %(project)s could not be found" -#: nova/exception.py:1341 +#: nova/exception.py:1350 msgid "Instance recreate is not supported." msgstr "" -#: nova/exception.py:1345 +#: nova/exception.py:1354 #, python-format msgid "" "The service from servicegroup driver %(driver)s is temporarily " "unavailable." msgstr "" -#: nova/exception.py:1350 +#: nova/exception.py:1359 #, python-format msgid "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" -#: nova/exception.py:1355 +#: nova/exception.py:1364 #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" -#: nova/exception.py:1360 +#: nova/exception.py:1369 #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt " "driver" msgstr "" -#: nova/exception.py:1365 +#: nova/exception.py:1374 #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "" -#: nova/exception.py:1369 +#: nova/exception.py:1378 #, fuzzy, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Instance %(instance_id)s is not in rescue mode" -#: nova/exception.py:1373 +#: nova/exception.py:1382 #, fuzzy, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "Instance %(instance_id)s is not in rescue mode" -#: nova/exception.py:1378 +#: nova/exception.py:1387 #, fuzzy, python-format msgid "Shadow table with name %(name)s already exists." msgstr "Instance Type with name %(name)s already exists." -#: nova/exception.py:1383 +#: nova/exception.py:1392 #, python-format msgid "Instance rollback performed due to: %s" msgstr "" -#: nova/exception.py:1389 +#: nova/exception.py:1398 #, fuzzy, python-format msgid "Unsupported object type %(objtype)s" msgstr "Expected object of type: %s" -#: nova/exception.py:1393 +#: nova/exception.py:1402 #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "" -#: nova/exception.py:1397 +#: nova/exception.py:1406 #, python-format msgid "Version %(objver)s of %(objname)s is not supported" msgstr "" -#: nova/exception.py:1401 +#: nova/exception.py:1410 #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "" -#: nova/exception.py:1405 +#: nova/exception.py:1414 #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "" -#: nova/exception.py:1409 +#: nova/exception.py:1418 #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "" -#: nova/exception.py:1413 +#: nova/exception.py:1422 #, python-format msgid "Core API extensions are missing: %(missing_apis)s" msgstr "" -#: nova/exception.py:1417 +#: nova/exception.py:1426 #, python-format msgid "Error during following call to agent: %(method)s" msgstr "" -#: nova/exception.py:1421 +#: nova/exception.py:1430 #, python-format msgid "Unable to contact guest agent. The following call timed out: %(method)s" msgstr "" -#: nova/exception.py:1426 +#: nova/exception.py:1435 #, python-format msgid "Agent does not support the call: %(method)s" msgstr "" -#: nova/exception.py:1430 +#: nova/exception.py:1439 #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "" -#: nova/exception.py:1434 +#: nova/exception.py:1443 #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "" -#: nova/exception.py:1438 +#: nova/exception.py:1447 #, python-format msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s." msgstr "" -#: nova/exception.py:1443 +#: nova/exception.py:1452 #, python-format msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s." msgstr "" -#: nova/exception.py:1448 +#: nova/exception.py:1457 #, python-format msgid "Instance group %(group_uuid)s has no policy %(policy)s." msgstr "" -#: nova/exception.py:1452 +#: nova/exception.py:1461 #, python-format msgid "Number of retries to plugin (%(num_retries)d) exceeded." msgstr "" -#: nova/exception.py:1456 +#: nova/exception.py:1465 #, python-format msgid "There was an error with the download module %(module)s. %(reason)s" msgstr "" -#: nova/exception.py:1461 +#: nova/exception.py:1470 #, python-format msgid "" "The metadata for this location will not work with this module %(module)s." " %(reason)s." msgstr "" -#: nova/exception.py:1466 +#: nova/exception.py:1475 #, python-format msgid "The method %(method_name)s is not implemented." msgstr "" -#: nova/exception.py:1470 +#: nova/exception.py:1479 #, python-format msgid "The module %(module)s is misconfigured: %(reason)s." msgstr "" -#: nova/exception.py:1474 +#: nova/exception.py:1483 #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "" -#: nova/exception.py:1478 +#: nova/exception.py:1487 #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "" -#: nova/exception.py:1482 +#: nova/exception.py:1491 #, python-format msgid "PCI device %(id)s not found" msgstr "" -#: nova/exception.py:1486 +#: nova/exception.py:1495 #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "" -#: nova/exception.py:1490 +#: nova/exception.py:1499 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" -#: nova/exception.py:1496 +#: nova/exception.py:1505 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead " "of %(hopeowner)s" msgstr "" -#: nova/exception.py:1502 +#: nova/exception.py:1511 #, python-format msgid "PCI device request (%requests)s failed" msgstr "" -#: nova/exception.py:1507 +#: nova/exception.py:1516 #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty " "pool" msgstr "" -#: nova/exception.py:1513 +#: nova/exception.py:1522 #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "" -#: nova/exception.py:1517 +#: nova/exception.py:1526 #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "" -#: nova/exception.py:1522 +#: nova/exception.py:1531 #, python-format msgid "Not enough parameters: %(reason)s" msgstr "" -#: nova/exception.py:1527 +#: nova/exception.py:1536 #, python-format msgid "Invalid PCI devices Whitelist config %(reason)s" msgstr "" -#: nova/exception.py:1531 +#: nova/exception.py:1540 #, python-format msgid "Cannot change %(node_id)s to %(new_node_id)s" msgstr "" -#: nova/exception.py:1541 +#: nova/exception.py:1550 #, python-format msgid "" "Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: " "%(reason)s" msgstr "" -#: nova/exception.py:1546 +#: nova/exception.py:1555 #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "" -#: nova/exception.py:1550 +#: nova/exception.py:1559 #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "" -#: nova/exception.py:1554 +#: nova/exception.py:1563 #, python-format msgid "Key manager error: %(reason)s" msgstr "" -#: nova/exception.py:1558 +#: nova/exception.py:1567 #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "" -#: nova/exception.py:1562 +#: nova/exception.py:1571 #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "" -#: nova/exception.py:1566 +#: nova/exception.py:1575 #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" -#: nova/exception.py:1571 +#: nova/exception.py:1580 #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the " "maximum allowed by flavor %(max_vram)d." msgstr "" -#: nova/exception.py:1576 +#: nova/exception.py:1585 #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "" -#: nova/exception.py:1580 +#: nova/exception.py:1589 msgid "" -"Block migration of instances with config drives is not supported in " -"libvirt." +"Live migration of instances with config drives is not supported in " +"libvirt unless libvirt instance path and drive data is shared across " +"compute nodes." msgstr "" -#: nova/exception.py:1585 +#: nova/exception.py:1595 +#, python-format +msgid "" +"Host %(server)s is running an old version of Nova, live migrations " +"involving that version may cause data loss. Upgrade Nova on %(server)s " +"and try again." +msgstr "" + +#: nova/exception.py:1601 #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" +#: nova/exception.py:1605 +#, python-format +msgid "" +"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted " +"%(maxsockets)d:%(maxcores)d:%(maxthreads)d" +msgstr "" + +#: nova/exception.py:1610 +#, python-format +msgid "" +"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted " +"%(maxsockets)d:%(maxcores)d:%(maxthreads)d" +msgstr "" + +#: nova/exception.py:1615 +#, python-format +msgid "" +"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to" +" satisfy for vcpus count %(vcpus)d" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -1826,116 +1863,116 @@ msgstr "" msgid "Failed to roll back reservations %s" msgstr "" -#: nova/service.py:160 +#: nova/service.py:161 #, fuzzy, python-format msgid "Starting %(topic)s node (version %(version)s)" msgstr "Starting %(topic)s node (version %(vcs_string)s)" -#: nova/service.py:285 +#: nova/service.py:286 msgid "Service killed that has no database entry" msgstr "Service killed that has no database entry" -#: nova/service.py:297 +#: nova/service.py:298 msgid "Service error occurred during cleanup_host" msgstr "" -#: nova/service.py:314 +#: nova/service.py:315 #, python-format msgid "Temporary directory is invalid: %s" msgstr "" -#: nova/service.py:339 +#: nova/service.py:340 #, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" -#: nova/service.py:424 +#: nova/service.py:433 msgid "serve() can only be called once" msgstr "serve() can only be called once" -#: nova/utils.py:148 +#: nova/utils.py:147 #, fuzzy, python-format msgid "Expected to receive %(exp)s bytes, but actually %(act)s" msgstr "" "unexpected task state: expecting %(expected)s but the actual state is " "%(actual)s" -#: nova/utils.py:354 +#: nova/utils.py:353 #, python-format msgid "Couldn't get IPv4 : %(ex)s" msgstr "" -#: nova/utils.py:370 +#: nova/utils.py:369 #, python-format msgid "IPv4 address is not found.: %s" msgstr "" -#: nova/utils.py:373 +#: nova/utils.py:372 #, python-format msgid "Couldn't get IPv4 of %(interface)s : %(ex)s" msgstr "" -#: nova/utils.py:388 +#: nova/utils.py:387 #, python-format msgid "Link Local address is not found.:%s" msgstr "Link Local address is not found.:%s" -#: nova/utils.py:391 +#: nova/utils.py:390 #, python-format msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" -#: nova/utils.py:412 +#: nova/utils.py:411 #, python-format msgid "Invalid backend: %s" msgstr "Invalid backend: %s" -#: nova/utils.py:457 +#: nova/utils.py:454 #, python-format msgid "Expected object of type: %s" msgstr "Expected object of type: %s" -#: nova/utils.py:485 +#: nova/utils.py:482 #, python-format msgid "Invalid server_string: %s" msgstr "Invalid server_string: %s" -#: nova/utils.py:776 nova/virt/configdrive.py:177 +#: nova/utils.py:773 #, python-format msgid "Could not remove tmpdir: %s" msgstr "Could not remove tmpdir: %s" -#: nova/utils.py:966 +#: nova/utils.py:963 #, fuzzy, python-format msgid "%s is not a string or unicode" msgstr "Server name is not a string or unicode" -#: nova/utils.py:970 +#: nova/utils.py:967 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" -#: nova/utils.py:975 +#: nova/utils.py:972 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "" -#: nova/utils.py:985 +#: nova/utils.py:982 #, python-format msgid "%(value_name)s must be an integer" msgstr "" -#: nova/utils.py:991 +#: nova/utils.py:988 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "" -#: nova/utils.py:997 +#: nova/utils.py:994 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "" -#: nova/utils.py:1031 +#: nova/utils.py:1028 #, python-format msgid "Hypervisor version %s is invalid." msgstr "" @@ -1945,51 +1982,51 @@ msgstr "" msgid "Failed to load %(cfgfile)s: %(ex)s" msgstr "" -#: nova/wsgi.py:132 +#: nova/wsgi.py:133 #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "" -#: nova/wsgi.py:137 +#: nova/wsgi.py:138 #, python-format msgid "%(name)s listening on %(host)s:%(port)s" msgstr "%(name)s listening on %(host)s:%(port)s" -#: nova/wsgi.py:152 nova/openstack/common/sslutils.py:50 +#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:50 #, fuzzy, python-format msgid "Unable to find cert_file : %s" msgstr "Unable to find address %r" -#: nova/wsgi.py:156 nova/openstack/common/sslutils.py:53 +#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:53 #, fuzzy, python-format msgid "Unable to find ca_file : %s" msgstr "Unable to find address %r" -#: nova/wsgi.py:160 nova/openstack/common/sslutils.py:56 +#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:56 #, fuzzy, python-format msgid "Unable to find key_file : %s" msgstr "Unable to find address %r" -#: nova/wsgi.py:164 nova/openstack/common/sslutils.py:59 +#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:59 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" -#: nova/wsgi.py:195 +#: nova/wsgi.py:202 #, python-format msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support" msgstr "" -#: nova/wsgi.py:223 +#: nova/wsgi.py:238 msgid "Stopping WSGI server." msgstr "Stopping WSGI server." -#: nova/wsgi.py:242 +#: nova/wsgi.py:258 msgid "WSGI server has stopped." msgstr "WSGI server has stopped." -#: nova/wsgi.py:311 +#: nova/wsgi.py:327 msgid "You must implement __call__" msgstr "You must implement __call__" @@ -2069,153 +2106,153 @@ msgstr "Environment: %s" msgid "Unknown error occurred." msgstr "" -#: nova/api/ec2/cloud.py:395 +#: nova/api/ec2/cloud.py:391 #, python-format msgid "Create snapshot of volume %s" msgstr "Create snapshot of volume %s" -#: nova/api/ec2/cloud.py:420 +#: nova/api/ec2/cloud.py:416 #, python-format msgid "Could not find key pair(s): %s" msgstr "Could not find key pair(s): %s" -#: nova/api/ec2/cloud.py:436 +#: nova/api/ec2/cloud.py:432 #, python-format msgid "Create key pair %s" msgstr "Create key pair %s" -#: nova/api/ec2/cloud.py:448 +#: nova/api/ec2/cloud.py:444 #, python-format msgid "Import key %s" msgstr "Import key %s" -#: nova/api/ec2/cloud.py:461 +#: nova/api/ec2/cloud.py:457 #, python-format msgid "Delete key pair %s" msgstr "Delete key pair %s" -#: nova/api/ec2/cloud.py:603 nova/api/ec2/cloud.py:733 +#: nova/api/ec2/cloud.py:599 nova/api/ec2/cloud.py:729 msgid "need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:608 +#: nova/api/ec2/cloud.py:604 msgid "can't build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:616 +#: nova/api/ec2/cloud.py:612 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "" -#: nova/api/ec2/cloud.py:650 nova/api/ec2/cloud.py:686 +#: nova/api/ec2/cloud.py:646 nova/api/ec2/cloud.py:682 msgid "No rule for the specified parameters." msgstr "No rule for the specified parameters." -#: nova/api/ec2/cloud.py:764 +#: nova/api/ec2/cloud.py:760 #, python-format msgid "Get console output for instance %s" msgstr "Get console output for instance %s" -#: nova/api/ec2/cloud.py:836 +#: nova/api/ec2/cloud.py:832 #, python-format msgid "Create volume from snapshot %s" msgstr "Create volume from snapshot %s" -#: nova/api/ec2/cloud.py:840 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:836 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "Create volume of %s GB" -#: nova/api/ec2/cloud.py:880 +#: nova/api/ec2/cloud.py:876 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -#: nova/api/ec2/cloud.py:910 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:906 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "Detach volume %s" -#: nova/api/ec2/cloud.py:1242 +#: nova/api/ec2/cloud.py:1238 msgid "Allocate address" msgstr "Allocate address" -#: nova/api/ec2/cloud.py:1247 +#: nova/api/ec2/cloud.py:1243 #, python-format msgid "Release address %s" msgstr "Release address %s" -#: nova/api/ec2/cloud.py:1252 +#: nova/api/ec2/cloud.py:1248 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "Associate address %(public_ip)s to instance %(instance_id)s" -#: nova/api/ec2/cloud.py:1262 +#: nova/api/ec2/cloud.py:1258 msgid "Unable to associate IP Address, no fixed_ips." msgstr "Unable to associate IP Address, no fixed_ips." -#: nova/api/ec2/cloud.py:1270 -#: nova/api/openstack/compute/contrib/floating_ips.py:249 +#: nova/api/ec2/cloud.py:1266 +#: nova/api/openstack/compute/contrib/floating_ips.py:251 #, python-format msgid "multiple fixed_ips exist, using the first: %s" msgstr "multiple fixed_ips exist, using the first: %s" -#: nova/api/ec2/cloud.py:1283 +#: nova/api/ec2/cloud.py:1279 #, python-format msgid "Disassociate address %s" msgstr "Disassociate address %s" -#: nova/api/ec2/cloud.py:1300 nova/api/openstack/compute/servers.py:918 +#: nova/api/ec2/cloud.py:1296 nova/api/openstack/compute/servers.py:918 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "min_count must be <= max_count" -#: nova/api/ec2/cloud.py:1332 +#: nova/api/ec2/cloud.py:1328 msgid "Image must be available" msgstr "Image must be available" -#: nova/api/ec2/cloud.py:1429 +#: nova/api/ec2/cloud.py:1424 #, python-format msgid "Reboot instance %r" msgstr "Reboot instance %r" -#: nova/api/ec2/cloud.py:1542 +#: nova/api/ec2/cloud.py:1537 #, python-format msgid "De-registering image %s" msgstr "De-registering image %s" -#: nova/api/ec2/cloud.py:1558 +#: nova/api/ec2/cloud.py:1553 msgid "imageLocation is required" msgstr "imageLocation is required" -#: nova/api/ec2/cloud.py:1578 +#: nova/api/ec2/cloud.py:1573 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "Registered image %(image_location)s with id %(image_id)s" -#: nova/api/ec2/cloud.py:1639 +#: nova/api/ec2/cloud.py:1634 msgid "user or group not specified" msgstr "user or group not specified" -#: nova/api/ec2/cloud.py:1642 +#: nova/api/ec2/cloud.py:1637 msgid "only group \"all\" is supported" msgstr "only group \"all\" is supported" -#: nova/api/ec2/cloud.py:1645 +#: nova/api/ec2/cloud.py:1640 msgid "operation_type must be add or remove" msgstr "operation_type must be add or remove" -#: nova/api/ec2/cloud.py:1647 +#: nova/api/ec2/cloud.py:1642 #, python-format msgid "Updating image %s publicity" msgstr "Updating image %s publicity" -#: nova/api/ec2/cloud.py:1660 +#: nova/api/ec2/cloud.py:1655 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "Not allowed to modify attributes for image %s" -#: nova/api/ec2/cloud.py:1686 +#: nova/api/ec2/cloud.py:1685 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " @@ -2224,49 +2261,51 @@ msgstr "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" -#: nova/api/ec2/cloud.py:1717 +#: nova/api/ec2/cloud.py:1718 #, python-format -msgid "Couldn't stop instance within %d sec" +msgid "" +"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " +"%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1736 +#: nova/api/ec2/cloud.py:1742 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "image of %(instance)s at %(now)s" -#: nova/api/ec2/cloud.py:1761 nova/api/ec2/cloud.py:1811 +#: nova/api/ec2/cloud.py:1767 nova/api/ec2/cloud.py:1817 msgid "resource_id and tag are required" msgstr "" -#: nova/api/ec2/cloud.py:1765 nova/api/ec2/cloud.py:1815 +#: nova/api/ec2/cloud.py:1771 nova/api/ec2/cloud.py:1821 #, fuzzy msgid "Expecting a list of resources" msgstr "Getting list of instances" -#: nova/api/ec2/cloud.py:1770 nova/api/ec2/cloud.py:1820 -#: nova/api/ec2/cloud.py:1878 +#: nova/api/ec2/cloud.py:1776 nova/api/ec2/cloud.py:1826 +#: nova/api/ec2/cloud.py:1884 #, fuzzy msgid "Only instances implemented" msgstr "instance not present" -#: nova/api/ec2/cloud.py:1774 nova/api/ec2/cloud.py:1824 +#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1830 #, fuzzy msgid "Expecting a list of tagSets" msgstr "Getting list of instances" -#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1833 +#: nova/api/ec2/cloud.py:1786 nova/api/ec2/cloud.py:1839 msgid "Expecting tagSet to be key/value pairs" msgstr "" -#: nova/api/ec2/cloud.py:1787 +#: nova/api/ec2/cloud.py:1793 msgid "Expecting both key and value to be set" msgstr "" -#: nova/api/ec2/cloud.py:1838 +#: nova/api/ec2/cloud.py:1844 msgid "Expecting key to be set" msgstr "" -#: nova/api/ec2/cloud.py:1912 +#: nova/api/ec2/cloud.py:1918 msgid "Invalid CIDR" msgstr "Invalid CIDR" @@ -2284,39 +2323,39 @@ msgstr "" msgid "Timestamp is invalid." msgstr "The request is invalid." -#: nova/api/metadata/handler.py:111 +#: nova/api/metadata/handler.py:112 msgid "" "X-Instance-ID present in request headers. The " "'service_neutron_metadata_proxy' option must be enabled to process this " "header." msgstr "" -#: nova/api/metadata/handler.py:140 nova/api/metadata/handler.py:147 +#: nova/api/metadata/handler.py:141 nova/api/metadata/handler.py:148 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "Failed to get metadata for ip: %s" -#: nova/api/metadata/handler.py:142 nova/api/metadata/handler.py:198 +#: nova/api/metadata/handler.py:143 nova/api/metadata/handler.py:199 msgid "An unknown error has occurred. Please try your request again." msgstr "An unknown error has occurred. Please try your request again." -#: nova/api/metadata/handler.py:160 +#: nova/api/metadata/handler.py:161 msgid "X-Instance-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:162 +#: nova/api/metadata/handler.py:163 msgid "X-Tenant-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:164 +#: nova/api/metadata/handler.py:165 msgid "Multiple X-Instance-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:166 +#: nova/api/metadata/handler.py:167 msgid "Multiple X-Tenant-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:180 +#: nova/api/metadata/handler.py:181 #, python-format msgid "" "X-Instance-ID-Signature: %(signature)s does not match the expected value:" @@ -2324,17 +2363,17 @@ msgid "" "%(remote_address)s" msgstr "" -#: nova/api/metadata/handler.py:189 +#: nova/api/metadata/handler.py:190 #, fuzzy msgid "Invalid proxy request signature." msgstr "Invalid request: %s" -#: nova/api/metadata/handler.py:196 nova/api/metadata/handler.py:203 +#: nova/api/metadata/handler.py:197 nova/api/metadata/handler.py:204 #, fuzzy, python-format msgid "Failed to get metadata for instance id: %s" msgstr "Failed to get metadata for ip: %s" -#: nova/api/metadata/handler.py:207 +#: nova/api/metadata/handler.py:208 #, python-format msgid "" "Tenant_id %(tenant_id)s does not match tenant_id of instance " @@ -2363,11 +2402,11 @@ msgstr "Caught error: %s" msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s returned with HTTP %(status)d" -#: nova/api/openstack/__init__.py:190 +#: nova/api/openstack/__init__.py:186 msgid "Must specify an ExtensionManager class" msgstr "Must specify an ExtensionManager class" -#: nova/api/openstack/__init__.py:236 nova/api/openstack/__init__.py:410 +#: nova/api/openstack/__init__.py:232 nova/api/openstack/__init__.py:406 #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " @@ -2376,28 +2415,28 @@ msgstr "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" -#: nova/api/openstack/__init__.py:283 +#: nova/api/openstack/__init__.py:279 #: nova/api/openstack/compute/plugins/v3/servers.py:99 #, python-format msgid "Not loading %s because it is in the blacklist" msgstr "" -#: nova/api/openstack/__init__.py:288 +#: nova/api/openstack/__init__.py:284 #: nova/api/openstack/compute/plugins/v3/servers.py:104 #, python-format msgid "Not loading %s because it is not in the whitelist" msgstr "" -#: nova/api/openstack/__init__.py:295 +#: nova/api/openstack/__init__.py:291 msgid "V3 API has been disabled by configuration" msgstr "" -#: nova/api/openstack/__init__.py:308 +#: nova/api/openstack/__init__.py:304 #, python-format msgid "Extensions in both blacklist and whitelist: %s" msgstr "" -#: nova/api/openstack/__init__.py:332 +#: nova/api/openstack/__init__.py:328 #, fuzzy, python-format msgid "Missing core API extensions: %s" msgstr "Loading extension %s" @@ -2437,59 +2476,51 @@ msgstr "limit param must be positive" msgid "offset param must be positive" msgstr "offset param must be positive" -#: nova/api/openstack/common.py:259 nova/api/openstack/compute/flavors.py:146 -#: nova/api/openstack/compute/servers.py:603 -#: nova/api/openstack/compute/plugins/v3/flavors.py:110 -#: nova/api/openstack/compute/plugins/v3/servers.py:280 -#, python-format -msgid "marker [%s] not found" -msgstr "marker [%s] not found" - -#: nova/api/openstack/common.py:299 +#: nova/api/openstack/common.py:276 #, python-format msgid "href %s does not contain version" msgstr "href %s does not contain version" -#: nova/api/openstack/common.py:314 +#: nova/api/openstack/common.py:291 msgid "Image metadata limit exceeded" msgstr "Image metadata limit exceeded" -#: nova/api/openstack/common.py:322 +#: nova/api/openstack/common.py:299 msgid "Image metadata key cannot be blank" msgstr "Image metadata key cannot be blank" -#: nova/api/openstack/common.py:325 +#: nova/api/openstack/common.py:302 msgid "Image metadata key too long" msgstr "Image metadata key too long" -#: nova/api/openstack/common.py:328 +#: nova/api/openstack/common.py:305 msgid "Invalid image metadata" msgstr "Invalid image metadata" -#: nova/api/openstack/common.py:391 +#: nova/api/openstack/common.py:368 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "Cannot '%(action)s' while instance is in %(attr)s %(state)s" -#: nova/api/openstack/common.py:394 +#: nova/api/openstack/common.py:371 #, python-format msgid "Cannot '%s' an instance which has never been active" msgstr "" -#: nova/api/openstack/common.py:397 +#: nova/api/openstack/common.py:374 #, fuzzy, python-format msgid "Instance is in an invalid state for '%s'" msgstr "Instance is in an invalid state for '%(action)s'" -#: nova/api/openstack/common.py:477 +#: nova/api/openstack/common.py:454 msgid "Rejecting snapshot request, snapshots currently disabled" msgstr "Rejecting snapshot request, snapshots currently disabled" -#: nova/api/openstack/common.py:479 +#: nova/api/openstack/common.py:456 msgid "Instance snapshots are not permitted at this time." msgstr "Instance snapshots are not permitted at this time." -#: nova/api/openstack/common.py:600 +#: nova/api/openstack/common.py:577 msgid "Cells is not enabled." msgstr "" @@ -2625,6 +2656,14 @@ msgstr "Invalid minRam filter [%s]" msgid "Invalid minDisk filter [%s]" msgstr "Invalid minDisk filter [%s]" +#: nova/api/openstack/compute/flavors.py:146 +#: nova/api/openstack/compute/servers.py:603 +#: nova/api/openstack/compute/plugins/v3/flavors.py:110 +#: nova/api/openstack/compute/plugins/v3/servers.py:280 +#, python-format +msgid "marker [%s] not found" +msgstr "marker [%s] not found" + #: nova/api/openstack/compute/image_metadata.py:35 #: nova/api/openstack/compute/images.py:141 #: nova/api/openstack/compute/images.py:157 @@ -2638,7 +2677,7 @@ msgstr "Incorrect request body format" #: nova/api/openstack/compute/image_metadata.py:82 #: nova/api/openstack/compute/server_metadata.py:79 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:108 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:85 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72 #: nova/api/openstack/compute/plugins/v3/server_metadata.py:77 msgid "Request body and URI mismatch" msgstr "Request body and URI mismatch" @@ -2646,7 +2685,6 @@ msgstr "Request body and URI mismatch" #: nova/api/openstack/compute/image_metadata.py:85 #: nova/api/openstack/compute/server_metadata.py:83 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:111 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:88 #: nova/api/openstack/compute/plugins/v3/server_metadata.py:81 msgid "Request body contains too many items" msgstr "Request body contains too many items" @@ -2721,12 +2759,12 @@ msgstr "Host '%s' could not be found." #: nova/api/openstack/compute/servers.py:625 #: nova/api/openstack/compute/servers.py:772 -#: nova/api/openstack/compute/servers.py:1079 -#: nova/api/openstack/compute/servers.py:1199 -#: nova/api/openstack/compute/servers.py:1384 -#: nova/api/openstack/compute/plugins/v3/servers.py:615 -#: nova/api/openstack/compute/plugins/v3/servers.py:727 -#: nova/api/openstack/compute/plugins/v3/servers.py:846 +#: nova/api/openstack/compute/servers.py:1081 +#: nova/api/openstack/compute/servers.py:1203 +#: nova/api/openstack/compute/servers.py:1388 +#: nova/api/openstack/compute/plugins/v3/servers.py:617 +#: nova/api/openstack/compute/plugins/v3/servers.py:729 +#: nova/api/openstack/compute/plugins/v3/servers.py:848 msgid "Instance could not be found" msgstr "Instance could not be found" @@ -2827,126 +2865,125 @@ msgstr "Invalid key_name provided." msgid "Invalid config_drive provided." msgstr "" -#: nova/api/openstack/compute/servers.py:1064 +#: nova/api/openstack/compute/servers.py:1066 msgid "HostId cannot be updated." msgstr "HostId cannot be updated." -#: nova/api/openstack/compute/servers.py:1068 +#: nova/api/openstack/compute/servers.py:1070 #, fuzzy msgid "Personality cannot be updated." msgstr "HostId cannot be updated." -#: nova/api/openstack/compute/servers.py:1094 -#: nova/api/openstack/compute/servers.py:1113 -#: nova/api/openstack/compute/plugins/v3/servers.py:626 -#: nova/api/openstack/compute/plugins/v3/servers.py:642 +#: nova/api/openstack/compute/servers.py:1096 +#: nova/api/openstack/compute/servers.py:1115 +#: nova/api/openstack/compute/plugins/v3/servers.py:628 +#: nova/api/openstack/compute/plugins/v3/servers.py:644 msgid "Instance has not been resized." msgstr "Instance has not been resized." -#: nova/api/openstack/compute/servers.py:1116 -#: nova/api/openstack/compute/plugins/v3/servers.py:645 +#: nova/api/openstack/compute/servers.py:1118 +#: nova/api/openstack/compute/plugins/v3/servers.py:647 #, fuzzy msgid "Flavor used by the instance could not be found." msgstr "Instance %(instance_id)s could not be found." -#: nova/api/openstack/compute/servers.py:1132 -#: nova/api/openstack/compute/plugins/v3/servers.py:659 +#: nova/api/openstack/compute/servers.py:1134 +#: nova/api/openstack/compute/plugins/v3/servers.py:661 msgid "Argument 'type' for reboot must be a string" msgstr "" -#: nova/api/openstack/compute/servers.py:1138 -#: nova/api/openstack/compute/plugins/v3/servers.py:665 +#: nova/api/openstack/compute/servers.py:1140 +#: nova/api/openstack/compute/plugins/v3/servers.py:667 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "Argument 'type' for reboot is not HARD or SOFT" -#: nova/api/openstack/compute/servers.py:1142 -#: nova/api/openstack/compute/plugins/v3/servers.py:669 +#: nova/api/openstack/compute/servers.py:1144 +#: nova/api/openstack/compute/plugins/v3/servers.py:671 msgid "Missing argument 'type' for reboot" msgstr "Missing argument 'type' for reboot" -#: nova/api/openstack/compute/servers.py:1169 -#: nova/api/openstack/compute/plugins/v3/servers.py:697 +#: nova/api/openstack/compute/servers.py:1171 +#: nova/api/openstack/compute/plugins/v3/servers.py:699 msgid "Unable to locate requested flavor." msgstr "Unable to locate requested flavor." -#: nova/api/openstack/compute/servers.py:1172 -#: nova/api/openstack/compute/plugins/v3/servers.py:700 +#: nova/api/openstack/compute/servers.py:1174 +#: nova/api/openstack/compute/plugins/v3/servers.py:702 msgid "Resize requires a flavor change." msgstr "Resize requires a flavor change." -#: nova/api/openstack/compute/servers.py:1180 -#: nova/api/openstack/compute/plugins/v3/servers.py:708 +#: nova/api/openstack/compute/servers.py:1182 +#: nova/api/openstack/compute/plugins/v3/servers.py:710 msgid "You are not authorized to access the image the instance was started with." msgstr "" -#: nova/api/openstack/compute/servers.py:1184 -#: nova/api/openstack/compute/plugins/v3/servers.py:712 +#: nova/api/openstack/compute/servers.py:1186 +#: nova/api/openstack/compute/plugins/v3/servers.py:714 #, fuzzy msgid "Image that the instance was started with could not be found." msgstr "Instance %(instance_id)s could not be found." -#: nova/api/openstack/compute/servers.py:1188 -#: nova/api/openstack/compute/plugins/v3/servers.py:716 +#: nova/api/openstack/compute/servers.py:1190 +#: nova/api/openstack/compute/plugins/v3/servers.py:718 #, fuzzy msgid "Invalid instance image." msgstr "%s is a valid instance name" -#: nova/api/openstack/compute/servers.py:1211 +#: nova/api/openstack/compute/servers.py:1215 msgid "Missing imageRef attribute" msgstr "Missing imageRef attribute" -#: nova/api/openstack/compute/servers.py:1216 -#: nova/api/openstack/compute/servers.py:1224 +#: nova/api/openstack/compute/servers.py:1220 +#: nova/api/openstack/compute/servers.py:1228 msgid "Invalid imageRef provided." msgstr "Invalid imageRef provided." -#: nova/api/openstack/compute/servers.py:1254 +#: nova/api/openstack/compute/servers.py:1258 msgid "Missing flavorRef attribute" msgstr "Missing flavorRef attribute" -#: nova/api/openstack/compute/servers.py:1267 +#: nova/api/openstack/compute/servers.py:1271 msgid "No adminPass was specified" msgstr "No adminPass was specified" -#: nova/api/openstack/compute/servers.py:1275 +#: nova/api/openstack/compute/servers.py:1279 #: nova/api/openstack/compute/plugins/v3/admin_password.py:56 #, fuzzy msgid "Unable to set password on instance" msgstr "Failed to soft reboot instance." -#: nova/api/openstack/compute/servers.py:1284 +#: nova/api/openstack/compute/servers.py:1288 msgid "Unable to parse metadata key/value pairs." msgstr "Unable to parse metadata key/value pairs." -#: nova/api/openstack/compute/servers.py:1297 +#: nova/api/openstack/compute/servers.py:1301 msgid "Resize request has invalid 'flavorRef' attribute." msgstr "Resize request has invalid 'flavorRef' attribute." -#: nova/api/openstack/compute/servers.py:1300 +#: nova/api/openstack/compute/servers.py:1304 msgid "Resize requests require 'flavorRef' attribute." msgstr "Resize requests require 'flavorRef' attribute." -#: nova/api/openstack/compute/servers.py:1320 +#: nova/api/openstack/compute/servers.py:1324 msgid "Could not parse imageRef from request." msgstr "Could not parse imageRef from request." -#: nova/api/openstack/compute/servers.py:1390 -#: nova/api/openstack/compute/plugins/v3/servers.py:852 +#: nova/api/openstack/compute/servers.py:1394 +#: nova/api/openstack/compute/plugins/v3/servers.py:854 msgid "Cannot find image for rebuild" msgstr "Cannot find image for rebuild" -#: nova/api/openstack/compute/servers.py:1423 +#: nova/api/openstack/compute/servers.py:1427 msgid "createImage entity requires name attribute" msgstr "createImage entity requires name attribute" -#: nova/api/openstack/compute/servers.py:1432 -#: nova/api/openstack/compute/contrib/admin_actions.py:286 -#: nova/api/openstack/compute/plugins/v3/create_backup.py:85 -#: nova/api/openstack/compute/plugins/v3/servers.py:892 +#: nova/api/openstack/compute/servers.py:1436 +#: nova/api/openstack/compute/contrib/admin_actions.py:288 +#: nova/api/openstack/compute/plugins/v3/servers.py:894 msgid "Invalid metadata" msgstr "Invalid metadata" -#: nova/api/openstack/compute/servers.py:1490 +#: nova/api/openstack/compute/servers.py:1494 msgid "Invalid adminPass" msgstr "Invalid adminPass" @@ -2954,11 +2991,11 @@ msgstr "Invalid adminPass" #: nova/api/openstack/compute/contrib/admin_actions.py:88 #: nova/api/openstack/compute/contrib/admin_actions.py:113 #: nova/api/openstack/compute/contrib/admin_actions.py:135 -#: nova/api/openstack/compute/contrib/admin_actions.py:176 -#: nova/api/openstack/compute/contrib/admin_actions.py:195 -#: nova/api/openstack/compute/contrib/admin_actions.py:214 -#: nova/api/openstack/compute/contrib/admin_actions.py:233 -#: nova/api/openstack/compute/contrib/admin_actions.py:391 +#: nova/api/openstack/compute/contrib/admin_actions.py:178 +#: nova/api/openstack/compute/contrib/admin_actions.py:197 +#: nova/api/openstack/compute/contrib/admin_actions.py:216 +#: nova/api/openstack/compute/contrib/admin_actions.py:235 +#: nova/api/openstack/compute/contrib/admin_actions.py:393 #: nova/api/openstack/compute/contrib/multinic.py:43 #: nova/api/openstack/compute/contrib/rescue.py:45 #: nova/api/openstack/compute/contrib/shelve.py:43 @@ -2966,6 +3003,8 @@ msgid "Server not found" msgstr "Server not found" #: nova/api/openstack/compute/contrib/admin_actions.py:66 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 msgid "Virt driver does not implement pause function." msgstr "" @@ -2993,130 +3032,114 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:163 +#: nova/api/openstack/compute/contrib/admin_actions.py:165 #, python-format msgid "Error in migrate %s" msgstr "Error in migrate %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:182 +#: nova/api/openstack/compute/contrib/admin_actions.py:184 #, python-format msgid "Compute.api::reset_network %s" msgstr "Compute.api::reset_network %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:201 +#: nova/api/openstack/compute/contrib/admin_actions.py:203 #, python-format msgid "Compute.api::inject_network_info %s" msgstr "Compute.api::inject_network_info %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:218 +#: nova/api/openstack/compute/contrib/admin_actions.py:220 #, python-format msgid "Compute.api::lock %s" msgstr "Compute.api::lock %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:237 +#: nova/api/openstack/compute/contrib/admin_actions.py:239 #, python-format msgid "Compute.api::unlock %s" msgstr "Compute.api::unlock %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:263 +#: nova/api/openstack/compute/contrib/admin_actions.py:265 #, python-format msgid "createBackup entity requires %s attribute" msgstr "createBackup entity requires %s attribute" -#: nova/api/openstack/compute/contrib/admin_actions.py:267 +#: nova/api/openstack/compute/contrib/admin_actions.py:269 msgid "Malformed createBackup entity" msgstr "Malformed createBackup entity" -#: nova/api/openstack/compute/contrib/admin_actions.py:273 +#: nova/api/openstack/compute/contrib/admin_actions.py:275 msgid "createBackup attribute 'rotation' must be an integer" msgstr "createBackup attribute 'rotation' must be an integer" -#: nova/api/openstack/compute/contrib/admin_actions.py:276 +#: nova/api/openstack/compute/contrib/admin_actions.py:278 #, fuzzy msgid "createBackup attribute 'rotation' must be greater than or equal to zero" msgstr "createBackup attribute 'rotation' must be an integer" -#: nova/api/openstack/compute/contrib/admin_actions.py:292 -#: nova/api/openstack/compute/contrib/console_output.py:45 +#: nova/api/openstack/compute/contrib/admin_actions.py:294 +#: nova/api/openstack/compute/contrib/console_output.py:46 #: nova/api/openstack/compute/contrib/server_start_stop.py:40 msgid "Instance not found" msgstr "Instance not found" -#: nova/api/openstack/compute/contrib/admin_actions.py:323 -#: nova/api/openstack/compute/plugins/v3/migrate_server.py:80 +#: nova/api/openstack/compute/contrib/admin_actions.py:325 msgid "" "host, block_migration and disk_over_commit must be specified for live " "migration." msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:360 +#: nova/api/openstack/compute/contrib/admin_actions.py:362 #, fuzzy, python-format msgid "Live migration of instance %s to another host failed" msgstr "Live migration of instance %(id)s to host %(host)s failed" -#: nova/api/openstack/compute/contrib/admin_actions.py:363 +#: nova/api/openstack/compute/contrib/admin_actions.py:365 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "Live migration of instance %(id)s to host %(host)s failed" -#: nova/api/openstack/compute/contrib/admin_actions.py:381 +#: nova/api/openstack/compute/contrib/admin_actions.py:383 #: nova/api/openstack/compute/plugins/v3/admin_actions.py:83 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "Desired state must be specified. Valid states are: %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:395 +#: nova/api/openstack/compute/contrib/admin_actions.py:397 #, python-format msgid "Compute.api::resetState %s" msgstr "Compute.api::resetState %s" -#: nova/api/openstack/compute/contrib/aggregates.py:99 -#, fuzzy, python-format -msgid "Cannot show aggregate: %s" -msgstr "Cannot show aggregate: %(id)s" - -#: nova/api/openstack/compute/contrib/aggregates.py:137 -#, fuzzy, python-format -msgid "Cannot update aggregate: %s" -msgstr "Cannot update aggregate: %(id)s" - -#: nova/api/openstack/compute/contrib/aggregates.py:151 -#, fuzzy, python-format -msgid "Cannot delete aggregate: %s" -msgstr "Cannot delete aggregate: %(id)s" - -#: nova/api/openstack/compute/contrib/aggregates.py:162 +#: nova/api/openstack/compute/contrib/aggregates.py:161 #, python-format msgid "Aggregates does not have %s action" msgstr "Aggregates does not have %s action" -#: nova/api/openstack/compute/contrib/aggregates.py:166 +#: nova/api/openstack/compute/contrib/aggregates.py:165 #: nova/api/openstack/compute/contrib/flavormanage.py:55 #: nova/api/openstack/compute/contrib/keypairs.py:86 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:167 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:169 msgid "Invalid request body" msgstr "Invalid request body" -#: nova/api/openstack/compute/contrib/aggregates.py:176 -#: nova/api/openstack/compute/contrib/aggregates.py:181 +#: nova/api/openstack/compute/contrib/aggregates.py:175 +#: nova/api/openstack/compute/contrib/aggregates.py:180 #, python-format msgid "Cannot add host %(host)s in aggregate %(id)s" msgstr "Cannot add host %(host)s in aggregate %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:195 -#: nova/api/openstack/compute/contrib/aggregates.py:199 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:151 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:155 +#: nova/api/openstack/compute/contrib/aggregates.py:194 +#: nova/api/openstack/compute/contrib/aggregates.py:198 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:153 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:157 #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Cannot remove host %(host)s in aggregate %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:218 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:175 +#: nova/api/openstack/compute/contrib/aggregates.py:217 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:177 msgid "The value of metadata must be a dict" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:230 +#: nova/api/openstack/compute/contrib/aggregates.py:229 #, python-format msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "Cannot set metadata %(metadata)s in aggregate %(id)s" @@ -3140,7 +3163,7 @@ msgstr "Failed to add interface: %s" #: nova/api/openstack/compute/contrib/attach_interfaces.py:119 #: nova/api/openstack/compute/contrib/attach_interfaces.py:154 #: nova/api/openstack/compute/contrib/attach_interfaces.py:177 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:166 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:169 #, fuzzy msgid "Network driver does not support this function." msgstr "Virt driver does not implement uptime function." @@ -3151,13 +3174,13 @@ msgid "Failed to attach interface" msgstr "Failed to add interface: %s" #: nova/api/openstack/compute/contrib/attach_interfaces.py:130 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:128 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:131 #, fuzzy msgid "Attachments update is not supported" msgstr "attribute not supported: %s" #: nova/api/openstack/compute/contrib/attach_interfaces.py:142 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:139 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:142 #, fuzzy, python-format msgid "Detach interface %s" msgstr "Starting VLAN interface %s" @@ -3238,19 +3261,19 @@ msgstr "" msgid "The requested console type details are not accessible" msgstr "" -#: nova/api/openstack/compute/contrib/console_output.py:51 +#: nova/api/openstack/compute/contrib/console_output.py:52 msgid "os-getConsoleOutput malformed or missing from request body" msgstr "os-getConsoleOutput malformed or missing from request body" -#: nova/api/openstack/compute/contrib/console_output.py:62 +#: nova/api/openstack/compute/contrib/console_output.py:63 msgid "Length in request body must be an integer value" msgstr "Length in request body must be an integer value" -#: nova/api/openstack/compute/contrib/console_output.py:70 +#: nova/api/openstack/compute/contrib/console_output.py:71 msgid "Unable to get console" msgstr "Unable to get console" -#: nova/api/openstack/compute/contrib/console_output.py:75 +#: nova/api/openstack/compute/contrib/console_output.py:76 #: nova/api/openstack/compute/plugins/v3/console_output.py:60 msgid "Unable to get console log, functionality not implemented" msgstr "" @@ -3261,17 +3284,17 @@ msgid "Instance not yet ready" msgstr "instance %s:not booted" #: nova/api/openstack/compute/contrib/consoles.py:52 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:62 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:60 msgid "Unable to get vnc console, functionality not implemented" msgstr "" #: nova/api/openstack/compute/contrib/consoles.py:76 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:93 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:89 msgid "Unable to get spice console, functionality not implemented" msgstr "" #: nova/api/openstack/compute/contrib/consoles.py:101 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:127 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:121 msgid "Unable to get rdp console, functionality not implemented" msgstr "" @@ -3324,8 +3347,12 @@ msgstr "Access list not available for public flavors." msgid "No request body" msgstr "No request body" +#: nova/api/openstack/compute/contrib/flavor_access.py:170 +#: nova/api/openstack/compute/contrib/flavor_access.py:194 +msgid "Missing tenant parameter" +msgstr "" + #: nova/api/openstack/compute/contrib/flavorextraspecs.py:56 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:42 msgid "No Request Body" msgstr "No Request Body" @@ -3335,8 +3362,8 @@ msgstr "" #: nova/api/openstack/compute/contrib/flavorextraspecs.py:134 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:150 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:113 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:132 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:96 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:115 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" @@ -3347,7 +3374,7 @@ msgid "DNS entries not found." msgstr "Instance not found" #: nova/api/openstack/compute/contrib/floating_ips.py:129 -#: nova/api/openstack/compute/contrib/floating_ips.py:177 +#: nova/api/openstack/compute/contrib/floating_ips.py:183 #, python-format msgid "Floating ip not found for id %s" msgstr "Floating ip not found for id %s" @@ -3361,52 +3388,61 @@ msgstr "No more floating ips in pool %s." msgid "No more floating ips available." msgstr "No more floating ips available." -#: nova/api/openstack/compute/contrib/floating_ips.py:218 -#: nova/api/openstack/compute/contrib/floating_ips.py:283 -#: nova/api/openstack/compute/contrib/security_groups.py:481 +#: nova/api/openstack/compute/contrib/floating_ips.py:168 +#, python-format +msgid "IP allocation over quota in pool %s." +msgstr "" + +#: nova/api/openstack/compute/contrib/floating_ips.py:170 +msgid "IP allocation over quota." +msgstr "" + +#: nova/api/openstack/compute/contrib/floating_ips.py:220 +#: nova/api/openstack/compute/contrib/floating_ips.py:285 +#: nova/api/openstack/compute/contrib/security_groups.py:482 msgid "Missing parameter dict" msgstr "Missing parameter dict" -#: nova/api/openstack/compute/contrib/floating_ips.py:221 -#: nova/api/openstack/compute/contrib/floating_ips.py:286 +#: nova/api/openstack/compute/contrib/floating_ips.py:223 +#: nova/api/openstack/compute/contrib/floating_ips.py:288 msgid "Address not specified" msgstr "Address not specified" -#: nova/api/openstack/compute/contrib/floating_ips.py:227 +#: nova/api/openstack/compute/contrib/floating_ips.py:229 msgid "No nw_info cache associated with instance" msgstr "No nw_info cache associated with instance" -#: nova/api/openstack/compute/contrib/floating_ips.py:232 +#: nova/api/openstack/compute/contrib/floating_ips.py:234 msgid "No fixed ips associated to instance" msgstr "No fixed ips associated to instance" -#: nova/api/openstack/compute/contrib/floating_ips.py:243 +#: nova/api/openstack/compute/contrib/floating_ips.py:245 #, fuzzy msgid "Specified fixed address not assigned to instance" msgstr "No fixed ips associated to instance" -#: nova/api/openstack/compute/contrib/floating_ips.py:257 +#: nova/api/openstack/compute/contrib/floating_ips.py:259 msgid "floating ip is already associated" msgstr "floating ip is already associated" -#: nova/api/openstack/compute/contrib/floating_ips.py:260 +#: nova/api/openstack/compute/contrib/floating_ips.py:262 msgid "l3driver call to add floating ip failed" msgstr "l3driver call to add floating ip failed" -#: nova/api/openstack/compute/contrib/floating_ips.py:263 -#: nova/api/openstack/compute/contrib/floating_ips.py:294 +#: nova/api/openstack/compute/contrib/floating_ips.py:265 +#: nova/api/openstack/compute/contrib/floating_ips.py:296 msgid "floating ip not found" msgstr "floating ip not found" -#: nova/api/openstack/compute/contrib/floating_ips.py:268 +#: nova/api/openstack/compute/contrib/floating_ips.py:270 msgid "Error. Unable to associate floating ip" msgstr "Error. Unable to associate floating ip" -#: nova/api/openstack/compute/contrib/floating_ips.py:309 +#: nova/api/openstack/compute/contrib/floating_ips.py:311 msgid "Floating ip is not associated" msgstr "Floating ip is not associated" -#: nova/api/openstack/compute/contrib/floating_ips.py:313 +#: nova/api/openstack/compute/contrib/floating_ips.py:315 #, fuzzy, python-format msgid "Floating ip %(address)s is not associated with instance %(id)s." msgstr "Floating ip %(address)s is not associated." @@ -3429,66 +3465,62 @@ msgid "fping utility is not found." msgstr "floating ip not found" #: nova/api/openstack/compute/contrib/hosts.py:183 -#: nova/api/openstack/compute/plugins/v3/hosts.py:128 #, python-format msgid "Invalid update setting: '%s'" msgstr "Invalid update setting: '%s'" #: nova/api/openstack/compute/contrib/hosts.py:186 -#: nova/api/openstack/compute/plugins/v3/hosts.py:131 #, python-format msgid "Invalid status: '%s'" msgstr "Invalid status: '%s'" #: nova/api/openstack/compute/contrib/hosts.py:188 -#: nova/api/openstack/compute/plugins/v3/hosts.py:133 #, python-format msgid "Invalid mode: '%s'" msgstr "Invalid mode: '%s'" #: nova/api/openstack/compute/contrib/hosts.py:190 -#: nova/api/openstack/compute/plugins/v3/hosts.py:135 msgid "'status' or 'maintenance_mode' needed for host update" msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:206 -#: nova/api/openstack/compute/plugins/v3/hosts.py:152 +#: nova/api/openstack/compute/plugins/v3/hosts.py:134 #, fuzzy, python-format msgid "Putting host %(host_name)s in maintenance mode %(mode)s." msgstr "Putting host %(host)s in maintenance mode %(mode)s." #: nova/api/openstack/compute/contrib/hosts.py:212 -#: nova/api/openstack/compute/plugins/v3/hosts.py:158 +#: nova/api/openstack/compute/plugins/v3/hosts.py:140 #, fuzzy msgid "Virt driver does not implement host maintenance mode." msgstr "Virt driver does not implement uptime function." #: nova/api/openstack/compute/contrib/hosts.py:227 -#: nova/api/openstack/compute/plugins/v3/hosts.py:174 +#: nova/api/openstack/compute/plugins/v3/hosts.py:156 #, fuzzy, python-format msgid "Enabling host %s." msgstr "Calling setter %s" #: nova/api/openstack/compute/contrib/hosts.py:229 -#: nova/api/openstack/compute/plugins/v3/hosts.py:176 +#: nova/api/openstack/compute/plugins/v3/hosts.py:158 #, fuzzy, python-format msgid "Disabling host %s." msgstr "Updating host stats" #: nova/api/openstack/compute/contrib/hosts.py:234 -#: nova/api/openstack/compute/plugins/v3/hosts.py:181 +#: nova/api/openstack/compute/plugins/v3/hosts.py:163 #, fuzzy msgid "Virt driver does not implement host disabled status." msgstr "Virt driver does not implement uptime function." #: nova/api/openstack/compute/contrib/hosts.py:250 -#: nova/api/openstack/compute/plugins/v3/hosts.py:199 +#: nova/api/openstack/compute/plugins/v3/hosts.py:181 #, fuzzy msgid "Virt driver does not implement host power management." msgstr "Virt driver does not implement uptime function." #: nova/api/openstack/compute/contrib/hosts.py:336 -#: nova/api/openstack/compute/plugins/v3/hosts.py:292 +#: nova/api/openstack/compute/plugins/v3/hosts.py:274 msgid "Describe-resource is admin only functionality" msgstr "Describe-resource is admin only functionality" @@ -3681,7 +3713,7 @@ msgid "Malformed scheduler_hints attribute" msgstr "Malformed scheduler_hints attribute" #: nova/api/openstack/compute/contrib/security_group_default_rules.py:127 -#: nova/api/openstack/compute/contrib/security_groups.py:386 +#: nova/api/openstack/compute/contrib/security_groups.py:387 msgid "Not enough parameters to build a valid rule." msgstr "Not enough parameters to build a valid rule." @@ -3695,16 +3727,16 @@ msgstr "This rule already exists in group %s" msgid "security group default rule not found" msgstr "Security group with rule %(rule_id)s not found." -#: nova/api/openstack/compute/contrib/security_groups.py:394 +#: nova/api/openstack/compute/contrib/security_groups.py:395 #, fuzzy, python-format msgid "Bad prefix for network in cidr %s" msgstr "Bad prefix for to_global_ipv6: %s" -#: nova/api/openstack/compute/contrib/security_groups.py:484 +#: nova/api/openstack/compute/contrib/security_groups.py:485 msgid "Security group not specified" msgstr "Security group not specified" -#: nova/api/openstack/compute/contrib/security_groups.py:488 +#: nova/api/openstack/compute/contrib/security_groups.py:489 msgid "Security group name cannot be empty" msgstr "Security group name cannot be empty" @@ -3737,39 +3769,39 @@ msgstr "" msgid "No instances found for any event" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:162 +#: nova/api/openstack/compute/contrib/server_groups.py:161 msgid "Conflicting policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:167 +#: nova/api/openstack/compute/contrib/server_groups.py:166 #, python-format msgid "Invalid policies: %s" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:172 +#: nova/api/openstack/compute/contrib/server_groups.py:171 msgid "Duplicate policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:177 +#: nova/api/openstack/compute/contrib/server_groups.py:176 msgid "the body is invalid." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:186 +#: nova/api/openstack/compute/contrib/server_groups.py:185 #, python-format msgid "'%s' is either missing or empty." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:192 +#: nova/api/openstack/compute/contrib/server_groups.py:191 #, python-format msgid "Invalid format for name: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:200 +#: nova/api/openstack/compute/contrib/server_groups.py:199 #, python-format msgid "'%s' is not a list" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:204 +#: nova/api/openstack/compute/contrib/server_groups.py:203 #, python-format msgid "unsupported fields: %s" msgstr "" @@ -3796,11 +3828,11 @@ msgstr "" msgid "Missing disabled reason field" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:231 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:230 msgid "Datetime is in invalid format" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:250 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:249 msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" @@ -3871,11 +3903,11 @@ msgstr "" msgid "access_ip_v6 is not proper IPv6 format" msgstr "" -#: nova/api/openstack/compute/plugins/v3/aggregates.py:170 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:172 msgid "Invalid request format for metadata" msgstr "" -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:103 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:106 #, python-format msgid "Attach interface to %s" msgstr "" @@ -3889,23 +3921,6 @@ msgstr "" msgid "token not provided" msgstr "" -#: nova/api/openstack/compute/plugins/v3/create_backup.py:62 -#, python-format -msgid "create_backup entity requires %s attribute" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:66 -msgid "Malformed create_backup entity" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:72 -msgid "create_backup attribute 'rotation' must be an integer" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:75 -msgid "create_backup attribute 'rotation' must be greater than or equal to zero" -msgstr "" - #: nova/api/openstack/compute/plugins/v3/extended_volumes.py:98 msgid "The volume was either invalid or not attached to the instance." msgstr "" @@ -3931,19 +3946,6 @@ msgstr "" msgid "Invalid min_disk filter [%s]" msgstr "" -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:66 -msgid "No or bad extra_specs provided" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:73 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:95 -msgid "Concurrent transaction has been committed, try again" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/hosts.py:120 -msgid "The request body invalid" -msgstr "" - #: nova/api/openstack/compute/plugins/v3/hypervisors.py:125 msgid "Need parameter 'query' to specify which hypervisor to filter on" msgstr "" @@ -3969,7 +3971,7 @@ msgid "" msgstr "" #: nova/api/openstack/compute/plugins/v3/servers.py:412 -#: nova/api/openstack/compute/plugins/v3/servers.py:585 +#: nova/api/openstack/compute/plugins/v3/servers.py:587 msgid "The request body is invalid" msgstr "" @@ -3978,39 +3980,39 @@ msgstr "" msgid "Invalid flavor_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:596 +#: nova/api/openstack/compute/plugins/v3/servers.py:598 msgid "host_id cannot be updated." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:741 +#: nova/api/openstack/compute/plugins/v3/servers.py:743 msgid "Invalid image_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:760 +#: nova/api/openstack/compute/plugins/v3/servers.py:762 msgid "Missing image_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:767 +#: nova/api/openstack/compute/plugins/v3/servers.py:769 msgid "Missing flavor_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:780 +#: nova/api/openstack/compute/plugins/v3/servers.py:782 msgid "Resize request has invalid 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:783 +#: nova/api/openstack/compute/plugins/v3/servers.py:785 msgid "Resize requests require 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:799 +#: nova/api/openstack/compute/plugins/v3/servers.py:801 msgid "Could not parse image_ref from request." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:883 +#: nova/api/openstack/compute/plugins/v3/servers.py:885 msgid "create_image entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:945 +#: nova/api/openstack/compute/plugins/v3/servers.py:947 msgid "Invalid admin_password" msgstr "" @@ -4022,12 +4024,12 @@ msgstr "" msgid "Instance has had its instance_type removed from the DB" msgstr "Instance has had its instance_type removed from the DB" -#: nova/api/validation/validators.py:61 +#: nova/api/validation/validators.py:62 #, python-format msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" -#: nova/cells/manager.py:78 +#: nova/cells/manager.py:79 msgid "" "The cells feature of Nova is considered experimental by the OpenStack " "project because it receives much less testing than the rest of Nova. This" @@ -4080,42 +4082,42 @@ msgstr "" msgid "Unknown method '%(method)s' in compute API" msgstr "Casted '%(method)s' to compute '%(host)s'" -#: nova/cells/messaging.py:1096 +#: nova/cells/messaging.py:1103 #, python-format msgid "Got message to create instance fault: %(instance_fault)s" msgstr "" -#: nova/cells/messaging.py:1119 +#: nova/cells/messaging.py:1126 #, python-format msgid "" "Forcing a sync of instances, project_id=%(projid_str)s, " "updated_since=%(since_str)s" msgstr "" -#: nova/cells/messaging.py:1198 +#: nova/cells/messaging.py:1205 #, python-format msgid "No match when trying to update BDM: %(bdm)s" msgstr "" -#: nova/cells/messaging.py:1673 +#: nova/cells/messaging.py:1680 #, python-format msgid "No cell_name for %(method)s() from API" msgstr "" -#: nova/cells/messaging.py:1690 +#: nova/cells/messaging.py:1697 msgid "No cell_name for instance update from API" msgstr "" -#: nova/cells/messaging.py:1853 +#: nova/cells/messaging.py:1860 #, python-format msgid "Returning exception %s to caller" msgstr "Returning exception %s to caller" -#: nova/cells/rpcapi.py:369 +#: nova/cells/rpcapi.py:378 msgid "Failed to notify cells of BDM update/create." msgstr "" -#: nova/cells/rpcapi.py:385 +#: nova/cells/rpcapi.py:394 msgid "Failed to notify cells of BDM destroy." msgstr "" @@ -4185,71 +4187,71 @@ msgstr "Netmask to push into openvpn config" msgid "Failed to load %s" msgstr "Failed to create VM %s" -#: nova/cmd/baremetal_deploy_helper.py:211 +#: nova/cmd/baremetal_deploy_helper.py:210 #, python-format msgid "parent device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:214 +#: nova/cmd/baremetal_deploy_helper.py:213 #, python-format msgid "root device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:216 +#: nova/cmd/baremetal_deploy_helper.py:215 #, python-format msgid "swap device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:218 +#: nova/cmd/baremetal_deploy_helper.py:217 #, python-format msgid "ephemeral device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:228 +#: nova/cmd/baremetal_deploy_helper.py:227 msgid "Failed to detect root device UUID." msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:252 +#: nova/cmd/baremetal_deploy_helper.py:251 #, python-format msgid "Cmd : %s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:253 +#: nova/cmd/baremetal_deploy_helper.py:252 #, python-format msgid "StdOut : %r" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:254 +#: nova/cmd/baremetal_deploy_helper.py:253 #, python-format msgid "StdErr : %r" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:282 +#: nova/cmd/baremetal_deploy_helper.py:281 #, python-format msgid "start deployment for node %(node_id)s, params %(params)s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:291 +#: nova/cmd/baremetal_deploy_helper.py:290 #, fuzzy, python-format msgid "deployment to node %s failed" msgstr "element is not a child" -#: nova/cmd/baremetal_deploy_helper.py:295 +#: nova/cmd/baremetal_deploy_helper.py:294 #, python-format msgid "deployment to node %s done" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:317 +#: nova/cmd/baremetal_deploy_helper.py:316 #, python-format msgid "post: environ=%s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:336 +#: nova/cmd/baremetal_deploy_helper.py:335 #, python-format msgid "Deploy agent error message: %s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:360 +#: nova/cmd/baremetal_deploy_helper.py:359 #, python-format msgid "request is queued: node %(node_id)s, params %(params)s" msgstr "" @@ -4371,40 +4373,40 @@ msgid "" "Use python-neutronclient instead." msgstr "" -#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:217 +#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:218 msgid "id" msgstr "id" -#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:218 +#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:219 msgid "IPv4" msgstr "IPv4" -#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:219 +#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:220 msgid "IPv6" msgstr "IPv6" -#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:220 +#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:221 msgid "start address" msgstr "start address" -#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:221 +#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:222 msgid "DNS1" msgstr "DNS1" -#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:222 +#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:223 msgid "DNS2" msgstr "DNS2" -#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:223 +#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:224 msgid "VlanID" msgstr "VlanID" #: nova/cmd/manage.py:558 nova/cmd/manage.py:665 -#: nova/tests/test_nova_manage.py:224 +#: nova/tests/test_nova_manage.py:225 msgid "project" msgstr "project" -#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:225 +#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:226 msgid "uuid" msgstr "uuid" @@ -4620,34 +4622,34 @@ msgstr "" msgid "No db access allowed in nova-network: %s" msgstr "" -#: nova/compute/api.py:362 +#: nova/compute/api.py:353 msgid "Cannot run any more instances of this type." msgstr "Cannot run any more instances of this type." -#: nova/compute/api.py:369 +#: nova/compute/api.py:360 #, python-format msgid "Can only run %s more instances of this type." msgstr "Can only run %s more instances of this type." -#: nova/compute/api.py:381 +#: nova/compute/api.py:372 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d " "instances. %(msg)s" msgstr "" -#: nova/compute/api.py:385 +#: nova/compute/api.py:376 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d " "and %(max_count)d instances. %(msg)s" msgstr "" -#: nova/compute/api.py:406 +#: nova/compute/api.py:397 msgid "Metadata type should be dict." msgstr "" -#: nova/compute/api.py:412 +#: nova/compute/api.py:403 #, python-format msgid "" "Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " @@ -4656,256 +4658,264 @@ msgstr "" "Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " "properties" -#: nova/compute/api.py:424 +#: nova/compute/api.py:415 #, python-format msgid "Metadata property key '%s' is not a string." msgstr "" -#: nova/compute/api.py:427 +#: nova/compute/api.py:418 #, python-format msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string." msgstr "" -#: nova/compute/api.py:431 +#: nova/compute/api.py:422 msgid "Metadata property key blank" msgstr "Metadata property key blank" -#: nova/compute/api.py:434 +#: nova/compute/api.py:425 msgid "Metadata property key greater than 255 characters" msgstr "Metadata property key greater than 255 characters" -#: nova/compute/api.py:437 +#: nova/compute/api.py:428 msgid "Metadata property value greater than 255 characters" msgstr "Metadata property value greater than 255 characters" -#: nova/compute/api.py:574 +#: nova/compute/api.py:565 msgid "Failed to set instance name using multi_instance_display_name_template." msgstr "" -#: nova/compute/api.py:676 +#: nova/compute/api.py:667 #, fuzzy msgid "Cannot attach one or more volumes to multiple instances" msgstr "Unable to attach boot volume to instance %s" -#: nova/compute/api.py:718 +#: nova/compute/api.py:709 msgid "The requested availability zone is not available" msgstr "" -#: nova/compute/api.py:1119 +#: nova/compute/api.py:1110 msgid "" "Images with destination_type 'volume' need to have a non-zero size " "specified" msgstr "" -#: nova/compute/api.py:1150 +#: nova/compute/api.py:1141 msgid "More than one swap drive requested." msgstr "" -#: nova/compute/api.py:1299 -#: nova/tests/api/openstack/compute/test_servers.py:3122 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2460 +#: nova/compute/api.py:1290 +#: nova/tests/api/openstack/compute/test_servers.py:3145 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2484 msgid "" "Unable to launch multiple instances with a single configured port ID. " "Please launch your instance one by one with different ports." msgstr "" -#: nova/compute/api.py:1401 +#: nova/compute/api.py:1311 +msgid "max_count cannot be greater than 1 if an fixed_ip is specified." +msgstr "" + +#: nova/compute/api.py:1415 #, fuzzy msgid "instance termination disabled" msgstr "Going to start terminating instances" -#: nova/compute/api.py:1416 +#: nova/compute/api.py:1430 #, python-format msgid "Working on deleting snapshot %s from shelved instance..." msgstr "" -#: nova/compute/api.py:1423 +#: nova/compute/api.py:1437 #, python-format msgid "Failed to delete snapshot from shelved instance (%s)." msgstr "" -#: nova/compute/api.py:1427 +#: nova/compute/api.py:1441 msgid "" "Something wrong happened when trying to delete snapshot from shelved " "instance." msgstr "" -#: nova/compute/api.py:1492 +#: nova/compute/api.py:1506 msgid "Instance is already in deleting state, ignoring this request" msgstr "" -#: nova/compute/api.py:1540 +#: nova/compute/api.py:1553 #, python-format msgid "" "Found an unconfirmed migration during delete, id: %(id)s, status: " "%(status)s" msgstr "" -#: nova/compute/api.py:1550 +#: nova/compute/api.py:1563 msgid "Instance may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1567 +#: nova/compute/api.py:1580 #, python-format msgid "Migration %s may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1603 +#: nova/compute/api.py:1615 #, python-format msgid "Flavor %d not found" msgstr "" -#: nova/compute/api.py:1621 +#: nova/compute/api.py:1633 #, fuzzy, python-format msgid "instance's host %s is down, deleting from database" msgstr "host for instance is down, deleting from database" -#: nova/compute/api.py:1648 nova/compute/manager.py:2279 +#: nova/compute/api.py:1660 #, python-format msgid "Ignoring volume cleanup failure due to %s" msgstr "Ignoring volume cleanup failure due to %s" -#: nova/compute/api.py:2043 +#: nova/compute/api.py:2061 #, python-format msgid "snapshot for %s" msgstr "snapshot for %s" -#: nova/compute/api.py:2415 +#: nova/compute/api.py:2399 +msgid "Resize to zero disk flavor is not allowed." +msgstr "" + +#: nova/compute/api.py:2438 #, fuzzy, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance." msgstr "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s" -#: nova/compute/api.py:2584 +#: nova/compute/api.py:2613 msgid "Cannot rescue a volume-backed instance" msgstr "" -#: nova/compute/api.py:2811 +#: nova/compute/api.py:2840 msgid "Volume must be attached in order to detach." msgstr "Volume must be attached in order to detach." -#: nova/compute/api.py:2831 +#: nova/compute/api.py:2860 msgid "Old volume is attached to a different instance." msgstr "" -#: nova/compute/api.py:2834 +#: nova/compute/api.py:2863 msgid "New volume must be detached in order to swap." msgstr "" -#: nova/compute/api.py:2837 +#: nova/compute/api.py:2866 msgid "New volume must be the same size or larger." msgstr "" -#: nova/compute/api.py:3032 +#: nova/compute/api.py:3067 #, python-format msgid "Instance compute service state on %s expected to be down, but it was up." msgstr "" -#: nova/compute/api.py:3335 +#: nova/compute/api.py:3369 msgid "Host aggregate is not empty" msgstr "" -#: nova/compute/api.py:3368 +#: nova/compute/api.py:3402 #, python-format msgid "More than 1 AZ for host %s" msgstr "" -#: nova/compute/api.py:3403 +#: nova/compute/api.py:3437 #, python-format msgid "Host already in availability zone %s" msgstr "" -#: nova/compute/api.py:3491 nova/tests/compute/test_keypairs.py:135 +#: nova/compute/api.py:3525 nova/tests/compute/test_keypairs.py:135 msgid "Keypair name contains unsafe characters" msgstr "Keypair name contains unsafe characters" -#: nova/compute/api.py:3495 nova/tests/compute/test_keypairs.py:127 +#: nova/compute/api.py:3529 nova/tests/compute/test_keypairs.py:127 #: nova/tests/compute/test_keypairs.py:131 msgid "Keypair name must be between 1 and 255 characters long" msgstr "Keypair name must be between 1 and 255 characters long" -#: nova/compute/api.py:3583 +#: nova/compute/api.py:3617 #, python-format msgid "Security group %s is not a string or unicode" msgstr "Security group %s is not a string or unicode" -#: nova/compute/api.py:3586 +#: nova/compute/api.py:3620 #, python-format msgid "Security group %s cannot be empty." msgstr "Security group %s cannot be empty." -#: nova/compute/api.py:3594 +#: nova/compute/api.py:3628 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " "limited to '%(allowed)s'." msgstr "" -#: nova/compute/api.py:3600 +#: nova/compute/api.py:3634 #, python-format msgid "Security group %s should not be greater than 255 characters." msgstr "Security group %s should not be greater than 255 characters." -#: nova/compute/api.py:3618 +#: nova/compute/api.py:3652 msgid "Quota exceeded, too many security groups." msgstr "Quota exceeded, too many security groups." -#: nova/compute/api.py:3621 +#: nova/compute/api.py:3655 #, python-format msgid "Create Security Group %s" msgstr "Create Security Group %s" -#: nova/compute/api.py:3633 +#: nova/compute/api.py:3667 #, python-format msgid "Security group %s already exists" msgstr "Security group %s already exists" -#: nova/compute/api.py:3646 +#: nova/compute/api.py:3680 #, fuzzy, python-format msgid "Unable to update system group '%s'" msgstr "Unable to destroy vbd %s" -#: nova/compute/api.py:3708 +#: nova/compute/api.py:3742 #, fuzzy, python-format msgid "Unable to delete system group '%s'" msgstr "Unable to destroy vbd %s" -#: nova/compute/api.py:3713 +#: nova/compute/api.py:3747 msgid "Security group is still in use" msgstr "Security group is still in use" -#: nova/compute/api.py:3723 +#: nova/compute/api.py:3757 msgid "Failed to update usages deallocating security group" msgstr "Failed to update usages deallocating security group" -#: nova/compute/api.py:3726 +#: nova/compute/api.py:3760 #, python-format msgid "Delete security group %s" msgstr "Delete security group %s" -#: nova/compute/api.py:3802 nova/compute/api.py:3885 +#: nova/compute/api.py:3836 nova/compute/api.py:3919 #, python-format msgid "Rule (%s) not found" msgstr "Rule (%s) not found" -#: nova/compute/api.py:3818 +#: nova/compute/api.py:3852 msgid "Quota exceeded, too many security group rules." msgstr "Quota exceeded, too many security group rules." -#: nova/compute/api.py:3821 +#: nova/compute/api.py:3855 #, python-format msgid "" "Security group %(name)s added %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3836 +#: nova/compute/api.py:3870 #, python-format msgid "" "Security group %(name)s removed %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3892 +#: nova/compute/api.py:3926 msgid "Security group id should be integer" msgstr "Security group id should be integer" @@ -4999,286 +5009,286 @@ msgid "" "underscores, colons and spaces." msgstr "" -#: nova/compute/manager.py:278 +#: nova/compute/manager.py:283 #, python-format msgid "Task possibly preempted: %s" msgstr "" -#: nova/compute/manager.py:360 nova/compute/manager.py:2849 +#: nova/compute/manager.py:365 nova/compute/manager.py:2885 #, python-format msgid "Error while trying to clean up image %s" msgstr "" -#: nova/compute/manager.py:501 +#: nova/compute/manager.py:506 msgid "Instance event failed" msgstr "" -#: nova/compute/manager.py:600 +#: nova/compute/manager.py:605 #, python-format msgid "%s is not a valid node managed by this compute host." msgstr "" -#: nova/compute/manager.py:698 +#: nova/compute/manager.py:704 #, fuzzy, python-format msgid "" "Deleting instance as its host (%(instance_host)s) is not equal to our " "host (%(our_host)s)." msgstr "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -#: nova/compute/manager.py:713 +#: nova/compute/manager.py:719 msgid "Instance has been marked deleted already, removing it from the hypervisor." msgstr "" -#: nova/compute/manager.py:733 +#: nova/compute/manager.py:739 msgid "" "Hypervisor driver does not support instance shared storage check, " "assuming it's not on shared storage" msgstr "" -#: nova/compute/manager.py:739 +#: nova/compute/manager.py:745 #, fuzzy msgid "Failed to check if instance shared" msgstr "Failed to terminate instance" -#: nova/compute/manager.py:805 nova/compute/manager.py:856 +#: nova/compute/manager.py:811 nova/compute/manager.py:862 msgid "Failed to complete a deletion" msgstr "" -#: nova/compute/manager.py:838 +#: nova/compute/manager.py:844 msgid "" "Service started deleting the instance during the previous run, but did " "not finish. Restarting the deletion now." msgstr "" -#: nova/compute/manager.py:879 +#: nova/compute/manager.py:885 #, python-format msgid "" "Instance in transitional state (%(task_state)s) at start-up and power " "state is (%(power_state)s), clearing task state" msgstr "" -#: nova/compute/manager.py:897 +#: nova/compute/manager.py:903 msgid "Failed to stop instance" msgstr "" -#: nova/compute/manager.py:909 +#: nova/compute/manager.py:915 msgid "Failed to start instance" msgstr "" -#: nova/compute/manager.py:934 +#: nova/compute/manager.py:940 msgid "Failed to revert crashed migration" msgstr "" -#: nova/compute/manager.py:937 +#: nova/compute/manager.py:943 msgid "Instance found in migrating state during startup. Resetting task_state" msgstr "" -#: nova/compute/manager.py:954 +#: nova/compute/manager.py:960 msgid "Rebooting instance after nova-compute restart." msgstr "Rebooting instance after nova-compute restart." -#: nova/compute/manager.py:964 +#: nova/compute/manager.py:970 msgid "Hypervisor driver does not support resume guests" msgstr "Hypervisor driver does not support resume guests" -#: nova/compute/manager.py:969 +#: nova/compute/manager.py:975 #, fuzzy msgid "Failed to resume instance" msgstr "Failed to suspend instance" -#: nova/compute/manager.py:978 +#: nova/compute/manager.py:984 msgid "Hypervisor driver does not support firewall rules" msgstr "Hypervisor driver does not support firewall rules" -#: nova/compute/manager.py:1003 +#: nova/compute/manager.py:1009 #, python-format -msgid "Lifecycle event %(state)d on VM %(uuid)s" +msgid "VM %(state)s (Lifecycle Event)" msgstr "" -#: nova/compute/manager.py:1019 +#: nova/compute/manager.py:1025 #, fuzzy, python-format msgid "Unexpected power state %d" msgstr "Unexpected status code" -#: nova/compute/manager.py:1124 +#: nova/compute/manager.py:1130 msgid "Hypervisor driver does not support security groups." msgstr "" -#: nova/compute/manager.py:1164 +#: nova/compute/manager.py:1168 #, python-format msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" -#: nova/compute/manager.py:1222 nova/compute/manager.py:1978 +#: nova/compute/manager.py:1225 nova/compute/manager.py:1982 msgid "Success" msgstr "" -#: nova/compute/manager.py:1246 +#: nova/compute/manager.py:1249 msgid "Instance disappeared before we could start it" msgstr "" -#: nova/compute/manager.py:1274 +#: nova/compute/manager.py:1276 msgid "Anti-affinity instance group policy was violated." msgstr "" -#: nova/compute/manager.py:1351 +#: nova/compute/manager.py:1353 msgid "Failed to dealloc network for deleted instance" msgstr "Failed to dealloc network for deleted instance" -#: nova/compute/manager.py:1356 +#: nova/compute/manager.py:1358 msgid "Instance disappeared during build" msgstr "" -#: nova/compute/manager.py:1372 +#: nova/compute/manager.py:1374 msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1399 +#: nova/compute/manager.py:1401 #, fuzzy, python-format msgid "Error: %s" msgstr "DB error: %s" -#: nova/compute/manager.py:1445 nova/compute/manager.py:3473 +#: nova/compute/manager.py:1447 nova/compute/manager.py:3509 msgid "Error trying to reschedule" msgstr "Error trying to reschedule" -#: nova/compute/manager.py:1500 +#: nova/compute/manager.py:1503 msgid "Instance build timed out. Set to error state." msgstr "Instance build timed out. Set to error state." -#: nova/compute/manager.py:1510 nova/compute/manager.py:1870 +#: nova/compute/manager.py:1513 nova/compute/manager.py:1873 msgid "Starting instance..." msgstr "Starting instance..." -#: nova/compute/manager.py:1528 +#: nova/compute/manager.py:1531 #, python-format msgid "" "Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0." msgstr "" -#: nova/compute/manager.py:1553 +#: nova/compute/manager.py:1556 #, python-format msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1557 +#: nova/compute/manager.py:1560 #, python-format msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" -#: nova/compute/manager.py:1738 +#: nova/compute/manager.py:1741 msgid "Instance failed block device setup" msgstr "Instance failed block device setup" -#: nova/compute/manager.py:1758 nova/compute/manager.py:2086 -#: nova/compute/manager.py:3985 +#: nova/compute/manager.py:1761 nova/compute/manager.py:2098 +#: nova/compute/manager.py:4041 msgid "Instance failed to spawn" msgstr "Instance failed to spawn" -#: nova/compute/manager.py:1937 +#: nova/compute/manager.py:1941 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2002 +#: nova/compute/manager.py:2006 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2008 nova/compute/manager.py:2048 +#: nova/compute/manager.py:2012 nova/compute/manager.py:2060 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2012 nova/compute/manager.py:2050 +#: nova/compute/manager.py:2016 nova/compute/manager.py:2062 msgid "Failed to allocate the network(s), not rescheduling." msgstr "" -#: nova/compute/manager.py:2074 +#: nova/compute/manager.py:2086 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2076 +#: nova/compute/manager.py:2088 msgid "Failure prepping block device." msgstr "" -#: nova/compute/manager.py:2099 +#: nova/compute/manager.py:2111 msgid "Could not clean up failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2109 +#: nova/compute/manager.py:2121 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2130 +#: nova/compute/manager.py:2142 msgid "Failed to cleanup volumes for failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2169 +#: nova/compute/manager.py:2181 #, fuzzy msgid "Failed to deallocate network for instance." msgstr "Failed to dealloc network for deleted instance" -#: nova/compute/manager.py:2178 +#: nova/compute/manager.py:2202 #, python-format msgid "%(action_str)s instance" msgstr "%(action_str)s instance" -#: nova/compute/manager.py:2222 +#: nova/compute/manager.py:2246 #, python-format msgid "Ignoring DiskNotFound: %s" msgstr "Ignoring DiskNotFound: %s" -#: nova/compute/manager.py:2225 +#: nova/compute/manager.py:2249 #, python-format msgid "Ignoring VolumeNotFound: %s" msgstr "Ignoring VolumeNotFound: %s" -#: nova/compute/manager.py:2324 +#: nova/compute/manager.py:2353 msgid "Instance disappeared during terminate" msgstr "" -#: nova/compute/manager.py:2330 nova/compute/manager.py:3653 -#: nova/compute/manager.py:5671 +#: nova/compute/manager.py:2359 nova/compute/manager.py:3689 +#: nova/compute/manager.py:5769 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2503 +#: nova/compute/manager.py:2539 msgid "Rebuilding instance" msgstr "Rebuilding instance" -#: nova/compute/manager.py:2516 +#: nova/compute/manager.py:2552 msgid "Invalid state of instance files on shared storage" msgstr "" -#: nova/compute/manager.py:2520 +#: nova/compute/manager.py:2556 msgid "disk on shared storage, recreating using existing disk" msgstr "" -#: nova/compute/manager.py:2524 +#: nova/compute/manager.py:2560 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "" -#: nova/compute/manager.py:2535 nova/compute/manager.py:4790 +#: nova/compute/manager.py:2571 nova/compute/manager.py:4884 #, fuzzy, python-format msgid "Failed to get compute_info for %s" msgstr "Failed to get info for disk %s" -#: nova/compute/manager.py:2611 +#: nova/compute/manager.py:2647 #, python-format msgid "bringing vm to original state: '%s'" msgstr "" -#: nova/compute/manager.py:2642 +#: nova/compute/manager.py:2678 #, fuzzy, python-format msgid "Detaching from volume api: %s" msgstr "Attach boot from volume failed: %s" -#: nova/compute/manager.py:2669 +#: nova/compute/manager.py:2705 msgid "Rebooting instance" msgstr "Rebooting instance" -#: nova/compute/manager.py:2686 +#: nova/compute/manager.py:2722 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " @@ -5287,25 +5297,25 @@ msgstr "" "trying to reboot a non-running instance: (state: %(state)s expected: " "%(running)s)" -#: nova/compute/manager.py:2722 +#: nova/compute/manager.py:2758 msgid "Reboot failed but instance is running" msgstr "" -#: nova/compute/manager.py:2730 +#: nova/compute/manager.py:2766 #, python-format msgid "Cannot reboot instance: %s" msgstr "" -#: nova/compute/manager.py:2742 +#: nova/compute/manager.py:2778 #, fuzzy msgid "Instance disappeared during reboot" msgstr "instance %s: rebooted" -#: nova/compute/manager.py:2810 +#: nova/compute/manager.py:2846 msgid "instance snapshotting" msgstr "instance snapshotting" -#: nova/compute/manager.py:2816 +#: nova/compute/manager.py:2852 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " @@ -5314,197 +5324,197 @@ msgstr "" "trying to snapshot a non-running instance: (state: %(state)s expected: " "%(running)s)" -#: nova/compute/manager.py:2854 +#: nova/compute/manager.py:2890 msgid "Image not found during snapshot" msgstr "" -#: nova/compute/manager.py:2936 +#: nova/compute/manager.py:2972 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "Failed to set admin password. Instance %s is not running" -#: nova/compute/manager.py:2943 +#: nova/compute/manager.py:2979 msgid "Root password set" msgstr "Root password set" -#: nova/compute/manager.py:2948 +#: nova/compute/manager.py:2984 #, fuzzy msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "set_admin_password is not implemented by this driver." -#: nova/compute/manager.py:2961 +#: nova/compute/manager.py:2997 #, python-format msgid "set_admin_password failed: %s" msgstr "set_admin_password failed: %s" -#: nova/compute/manager.py:2967 +#: nova/compute/manager.py:3003 msgid "error setting admin password" msgstr "error setting admin password" -#: nova/compute/manager.py:2983 +#: nova/compute/manager.py:3019 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " "expected: %(expected_state)s)" msgstr "" -#: nova/compute/manager.py:2988 +#: nova/compute/manager.py:3024 #, python-format msgid "injecting file to %s" msgstr "" -#: nova/compute/manager.py:3006 +#: nova/compute/manager.py:3042 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" msgstr "" -#: nova/compute/manager.py:3025 +#: nova/compute/manager.py:3061 msgid "Rescuing" msgstr "Rescuing" -#: nova/compute/manager.py:3046 +#: nova/compute/manager.py:3082 #, fuzzy msgid "Error trying to Rescue Instance" msgstr "Error trying to reschedule" -#: nova/compute/manager.py:3050 +#: nova/compute/manager.py:3086 #, fuzzy, python-format msgid "Driver Error: %s" msgstr "DB error: %s" -#: nova/compute/manager.py:3073 +#: nova/compute/manager.py:3109 msgid "Unrescuing" msgstr "Unrescuing" -#: nova/compute/manager.py:3144 +#: nova/compute/manager.py:3180 #, python-format msgid "Migration %s is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3149 +#: nova/compute/manager.py:3185 #, python-format msgid "Migration %s is already confirmed" msgstr "" -#: nova/compute/manager.py:3153 +#: nova/compute/manager.py:3189 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " "confirmation process" msgstr "" -#: nova/compute/manager.py:3167 +#: nova/compute/manager.py:3203 msgid "Instance is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3348 +#: nova/compute/manager.py:3384 #, fuzzy, python-format msgid "Updating instance to original state: '%s'" msgstr "Setting instance to %(state)s state." -#: nova/compute/manager.py:3371 +#: nova/compute/manager.py:3407 #, fuzzy msgid "Instance has no source host" msgstr "Instance has no volume." -#: nova/compute/manager.py:3377 +#: nova/compute/manager.py:3413 msgid "destination same as source!" msgstr "destination same as source!" -#: nova/compute/manager.py:3395 +#: nova/compute/manager.py:3431 msgid "Migrating" msgstr "Migrating" -#: nova/compute/manager.py:3659 +#: nova/compute/manager.py:3695 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:3719 +#: nova/compute/manager.py:3755 msgid "Pausing" msgstr "Pausing" -#: nova/compute/manager.py:3736 +#: nova/compute/manager.py:3772 msgid "Unpausing" msgstr "Unpausing" -#: nova/compute/manager.py:3777 +#: nova/compute/manager.py:3813 nova/compute/manager.py:3830 msgid "Retrieving diagnostics" msgstr "Retrieving diagnostics" -#: nova/compute/manager.py:3812 +#: nova/compute/manager.py:3866 msgid "Resuming" msgstr "Resuming" -#: nova/compute/manager.py:4028 +#: nova/compute/manager.py:4084 msgid "Get console output" msgstr "Get console output" -#: nova/compute/manager.py:4227 +#: nova/compute/manager.py:4283 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "Attaching volume %(volume_id)s to %(mountpoint)s" -#: nova/compute/manager.py:4236 +#: nova/compute/manager.py:4292 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4252 +#: nova/compute/manager.py:4308 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "Detach volume %(volume_id)s from mountpoint %(mp)s" -#: nova/compute/manager.py:4263 +#: nova/compute/manager.py:4319 msgid "Detaching volume from unknown instance" msgstr "Detaching volume from unknown instance" -#: nova/compute/manager.py:4275 +#: nova/compute/manager.py:4331 #, fuzzy, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "Faild to detach volume %(volume_id)s from %(mp)s" -#: nova/compute/manager.py:4348 +#: nova/compute/manager.py:4404 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4355 +#: nova/compute/manager.py:4411 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4442 +#: nova/compute/manager.py:4504 #, fuzzy, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "allocate_for_instance() for %s" -#: nova/compute/manager.py:4462 +#: nova/compute/manager.py:4524 #, python-format msgid "Port %s is not attached" msgstr "" -#: nova/compute/manager.py:4474 nova/tests/compute/test_compute.py:10545 +#: nova/compute/manager.py:4536 nova/tests/compute/test_compute.py:10612 #, python-format msgid "Host %s not found" msgstr "" -#: nova/compute/manager.py:4628 +#: nova/compute/manager.py:4690 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:4658 +#: nova/compute/manager.py:4753 msgid "_post_live_migration() is started.." msgstr "_post_live_migration() is started.." -#: nova/compute/manager.py:4731 +#: nova/compute/manager.py:4825 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "" -#: nova/compute/manager.py:4733 +#: nova/compute/manager.py:4827 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." @@ -5512,15 +5522,15 @@ msgstr "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." -#: nova/compute/manager.py:4758 +#: nova/compute/manager.py:4852 msgid "Post operation of migration started" msgstr "Post operation of migration started" -#: nova/compute/manager.py:4967 +#: nova/compute/manager.py:5057 msgid "An error occurred while refreshing the network cache." msgstr "" -#: nova/compute/manager.py:5021 +#: nova/compute/manager.py:5110 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " @@ -5529,12 +5539,12 @@ msgstr "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" -#: nova/compute/manager.py:5026 +#: nova/compute/manager.py:5115 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "Setting migration %(migration_id)s to error: %(reason)s" -#: nova/compute/manager.py:5035 +#: nova/compute/manager.py:5124 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " @@ -5543,30 +5553,30 @@ msgstr "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" -#: nova/compute/manager.py:5045 +#: nova/compute/manager.py:5134 #, python-format msgid "Instance %s not found" msgstr "" -#: nova/compute/manager.py:5050 +#: nova/compute/manager.py:5139 msgid "In ERROR state" msgstr "In ERROR state" -#: nova/compute/manager.py:5057 +#: nova/compute/manager.py:5146 #, fuzzy, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "In states %(vm_state)s/%(task_state)s, notRESIZED/None" -#: nova/compute/manager.py:5068 +#: nova/compute/manager.py:5157 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" -#: nova/compute/manager.py:5097 +#: nova/compute/manager.py:5186 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5117 +#: nova/compute/manager.py:5206 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " @@ -5575,20 +5585,20 @@ msgstr "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." -#: nova/compute/manager.py:5137 +#: nova/compute/manager.py:5226 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "Failed to generate usage audit for instance on host %s" -#: nova/compute/manager.py:5166 +#: nova/compute/manager.py:5255 msgid "Updating bandwidth usage cache" msgstr "Updating bandwidth usage cache" -#: nova/compute/manager.py:5188 +#: nova/compute/manager.py:5277 msgid "Bandwidth usage not supported by hypervisor." msgstr "" -#: nova/compute/manager.py:5311 +#: nova/compute/manager.py:5400 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " @@ -5597,16 +5607,7 @@ msgstr "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." -#: nova/compute/manager.py:5318 nova/compute/manager.py:5381 -#, python-format -msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." -msgstr "" - -#: nova/compute/manager.py:5342 -msgid "Periodic sync_power_state task had an error while processing an instance." -msgstr "" - -#: nova/compute/manager.py:5368 +#: nova/compute/manager.py:5466 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" @@ -5615,110 +5616,115 @@ msgstr "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" -#: nova/compute/manager.py:5406 +#: nova/compute/manager.py:5479 +#, python-format +msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + +#: nova/compute/manager.py:5504 msgid "Instance shutdown by itself. Calling the stop API." msgstr "Instance shutdown by itself. Calling the stop API." -#: nova/compute/manager.py:5418 nova/compute/manager.py:5427 -#: nova/compute/manager.py:5458 nova/compute/manager.py:5469 +#: nova/compute/manager.py:5516 nova/compute/manager.py:5525 +#: nova/compute/manager.py:5556 nova/compute/manager.py:5567 msgid "error during stop() in sync_power_state." msgstr "error during stop() in sync_power_state." -#: nova/compute/manager.py:5422 +#: nova/compute/manager.py:5520 #, fuzzy msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "Instance is paused or suspended unexpectedly. Calling the stop API." -#: nova/compute/manager.py:5438 +#: nova/compute/manager.py:5536 #, fuzzy msgid "Instance is paused unexpectedly. Ignore." msgstr "Instance is paused or suspended unexpectedly. Calling the stop API." -#: nova/compute/manager.py:5444 +#: nova/compute/manager.py:5542 msgid "Instance is unexpectedly not found. Ignore." msgstr "" -#: nova/compute/manager.py:5450 +#: nova/compute/manager.py:5548 msgid "Instance is not stopped. Calling the stop API." msgstr "Instance is not stopped. Calling the stop API." -#: nova/compute/manager.py:5464 +#: nova/compute/manager.py:5562 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5478 +#: nova/compute/manager.py:5576 msgid "Instance is not (soft-)deleted." msgstr "Instance is not (soft-)deleted." -#: nova/compute/manager.py:5507 +#: nova/compute/manager.py:5605 msgid "Reclaiming deleted instance" msgstr "Reclaiming deleted instance" -#: nova/compute/manager.py:5511 +#: nova/compute/manager.py:5609 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5536 +#: nova/compute/manager.py:5634 #, fuzzy, python-format msgid "Deleting orphan compute node %s" msgstr "Loading compute driver '%s'" -#: nova/compute/manager.py:5544 nova/compute/resource_tracker.py:392 +#: nova/compute/manager.py:5642 nova/compute/resource_tracker.py:391 #, python-format msgid "No service record for host %s" msgstr "No service record for host %s" -#: nova/compute/manager.py:5585 +#: nova/compute/manager.py:5682 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5591 +#: nova/compute/manager.py:5688 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" " still present on host." msgstr "" -#: nova/compute/manager.py:5600 +#: nova/compute/manager.py:5697 msgid "set_bootable is not implemented for the current driver" msgstr "" -#: nova/compute/manager.py:5605 +#: nova/compute/manager.py:5702 msgid "Failed to power off instance" msgstr "" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5706 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5619 +#: nova/compute/manager.py:5716 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5623 +#: nova/compute/manager.py:5720 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "" -#: nova/compute/manager.py:5654 +#: nova/compute/manager.py:5752 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "" -#: nova/compute/manager.py:5664 +#: nova/compute/manager.py:5762 #, fuzzy, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "Setting instance to %(state)s state." -#: nova/compute/resource_tracker.py:106 +#: nova/compute/resource_tracker.py:105 #, fuzzy msgid "" "Host field should not be set on the instance until resources have been " @@ -5727,22 +5733,22 @@ msgstr "" "Host field should be not be set on the instance until resources have been" " claimed." -#: nova/compute/resource_tracker.py:111 +#: nova/compute/resource_tracker.py:110 msgid "" "Node field should not be set on the instance until resources have been " "claimed." msgstr "" -#: nova/compute/resource_tracker.py:273 +#: nova/compute/resource_tracker.py:272 #, python-format msgid "Cannot get the metrics from %s." msgstr "" -#: nova/compute/resource_tracker.py:292 +#: nova/compute/resource_tracker.py:291 msgid "Auditing locally available compute resources" msgstr "" -#: nova/compute/resource_tracker.py:297 +#: nova/compute/resource_tracker.py:296 msgid "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." @@ -5750,62 +5756,62 @@ msgstr "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." -#: nova/compute/resource_tracker.py:372 +#: nova/compute/resource_tracker.py:371 #, fuzzy, python-format msgid "Compute_service record created for %(host)s:%(node)s" msgstr "Compute_service record created for %s " -#: nova/compute/resource_tracker.py:378 +#: nova/compute/resource_tracker.py:377 #, fuzzy, python-format msgid "Compute_service record updated for %(host)s:%(node)s" msgstr "Compute_service record updated for %s " -#: nova/compute/resource_tracker.py:431 +#: nova/compute/resource_tracker.py:430 #, python-format msgid "Free ram (MB): %s" msgstr "Free ram (MB): %s" -#: nova/compute/resource_tracker.py:432 +#: nova/compute/resource_tracker.py:431 #, python-format msgid "Free disk (GB): %s" msgstr "Free disk (GB): %s" -#: nova/compute/resource_tracker.py:437 +#: nova/compute/resource_tracker.py:436 #, python-format msgid "Free VCPUS: %s" msgstr "Free VCPUS: %s" -#: nova/compute/resource_tracker.py:439 +#: nova/compute/resource_tracker.py:438 msgid "Free VCPU information unavailable" msgstr "Free VCPU information unavailable" -#: nova/compute/resource_tracker.py:442 +#: nova/compute/resource_tracker.py:441 #, python-format msgid "PCI stats: %s" msgstr "" -#: nova/compute/resource_tracker.py:478 +#: nova/compute/resource_tracker.py:486 #, fuzzy, python-format msgid "Updating from migration %s" msgstr "Starting finish_migration" -#: nova/compute/resource_tracker.py:545 +#: nova/compute/resource_tracker.py:553 #, fuzzy msgid "Instance not resizing, skipping migration." msgstr "VM is not present, skipping destroy..." -#: nova/compute/resource_tracker.py:560 +#: nova/compute/resource_tracker.py:568 msgid "Flavor could not be found, skipping migration." msgstr "" -#: nova/compute/resource_tracker.py:650 +#: nova/compute/resource_tracker.py:658 #, python-format msgid "" "Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB " "memory)" msgstr "" -#: nova/compute/resource_tracker.py:664 +#: nova/compute/resource_tracker.py:672 #, python-format msgid "Missing keys: %s" msgstr "Missing keys: %s" @@ -5819,19 +5825,19 @@ msgstr "No compute host specified" msgid "Unable to find host for Instance %s" msgstr "Unable to find host for Instance %s" -#: nova/compute/utils.py:209 +#: nova/compute/utils.py:204 #, python-format msgid "Can't access image %(image_id)s: %(error)s" msgstr "" -#: nova/compute/utils.py:333 +#: nova/compute/utils.py:328 #, python-format msgid "" "No host name specified for the notification of HostAPI.%s and it will be " "ignored" msgstr "" -#: nova/compute/utils.py:461 +#: nova/compute/utils.py:456 #, python-format msgid "" "Value of 0 or None specified for %s. This behaviour will change in " @@ -5839,19 +5845,19 @@ msgid "" "'do not call'. To keep the 'do not call' behaviour, use a negative value." msgstr "" -#: nova/compute/monitors/__init__.py:177 +#: nova/compute/monitors/__init__.py:176 #, python-format msgid "" "Excluding monitor %(monitor_name)s due to metric name overlap; " "overlapping metrics: %(overlap)s" msgstr "" -#: nova/compute/monitors/__init__.py:185 +#: nova/compute/monitors/__init__.py:184 #, python-format msgid "Monitor %(monitor_name)s cannot be used: %(ex)s" msgstr "" -#: nova/compute/monitors/__init__.py:191 +#: nova/compute/monitors/__init__.py:190 #, python-format msgid "The following monitors have been disabled: %s" msgstr "" @@ -5861,11 +5867,11 @@ msgstr "" msgid "Not all properties needed are implemented in the compute driver: %s" msgstr "" -#: nova/conductor/api.py:300 +#: nova/conductor/api.py:318 msgid "nova-conductor connection established successfully" msgstr "" -#: nova/conductor/api.py:305 +#: nova/conductor/api.py:323 msgid "" "Timed out waiting for nova-conductor. Is it running? Or did this service" " start before nova-conductor? Reattempting establishment of nova-" @@ -5877,7 +5883,7 @@ msgstr "" msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s" msgstr "" -#: nova/conductor/manager.py:522 +#: nova/conductor/manager.py:523 msgid "No valid host found for cold migrate" msgstr "" @@ -5901,6 +5907,10 @@ msgstr "" msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED" msgstr "" +#: nova/conductor/manager.py:737 +msgid "No valid host found for rebuild" +msgstr "" + #: nova/conductor/tasks/live_migrate.py:113 #, python-format msgid "" @@ -5994,65 +6004,65 @@ msgstr "" msgid "Unrecognized read_deleted value '%s'" msgstr "Unrecognized read_deleted value '%s'" -#: nova/db/sqlalchemy/api.py:745 +#: nova/db/sqlalchemy/api.py:750 #, fuzzy, python-format msgid "Invalid floating ip id %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:850 +#: nova/db/sqlalchemy/api.py:855 msgid "Failed to update usages bulk deallocating floating IP" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 +#: nova/db/sqlalchemy/api.py:1011 #, fuzzy, python-format msgid "Invalid floating IP %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:1308 nova/db/sqlalchemy/api.py:1347 +#: nova/db/sqlalchemy/api.py:1313 nova/db/sqlalchemy/api.py:1352 #, fuzzy, python-format msgid "Invalid fixed IP Address %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:1482 +#: nova/db/sqlalchemy/api.py:1487 #, fuzzy, python-format msgid "Invalid virtual interface address %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:1576 +#: nova/db/sqlalchemy/api.py:1581 #, python-format msgid "" "Unknown osapi_compute_unique_server_name_scope value: %s Flag must be " "empty, \"global\" or \"project\"" msgstr "" -#: nova/db/sqlalchemy/api.py:1735 +#: nova/db/sqlalchemy/api.py:1741 #, fuzzy, python-format msgid "Invalid instance id %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:2013 +#: nova/db/sqlalchemy/api.py:2019 #, python-format msgid "Invalid field name: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:3242 +#: nova/db/sqlalchemy/api.py:3248 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:4892 +#: nova/db/sqlalchemy/api.py:4899 #, python-format msgid "" "Volume(%s) has lower stats then what is in the database. Instance must " "have been rebooted or crashed. Updating totals." msgstr "" -#: nova/db/sqlalchemy/api.py:5249 +#: nova/db/sqlalchemy/api.py:5256 #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" -#: nova/db/sqlalchemy/api.py:5639 +#: nova/db/sqlalchemy/api.py:5646 #, python-format msgid "IntegrityError detected when archiving table %s" msgstr "" @@ -6103,7 +6113,7 @@ msgstr "" msgid "Exception while seeding instance_types table" msgstr "" -#: nova/image/glance.py:231 +#: nova/image/glance.py:236 #, python-format msgid "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " @@ -6112,19 +6122,19 @@ msgstr "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " "%(extra)s." -#: nova/image/glance.py:265 +#: nova/image/glance.py:268 #, python-format msgid "" "When loading the module %(module_str)s the following error occurred: " "%(ex)s" msgstr "" -#: nova/image/glance.py:303 +#: nova/image/glance.py:306 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "" -#: nova/image/glance.py:319 +#: nova/image/glance.py:322 #, python-format msgid "Successfully transferred using %s" msgstr "" @@ -6270,17 +6280,17 @@ msgstr "" msgid "Not deleting key %s" msgstr "" -#: nova/network/api.py:198 nova/network/neutronv2/api.py:797 +#: nova/network/api.py:195 nova/network/neutronv2/api.py:797 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "re-assign floating IP %(address)s from instance %(instance_id)s" -#: nova/network/base_api.py:49 +#: nova/network/base_api.py:48 #, fuzzy msgid "Failed storing info cache" msgstr "Failed to terminate instance" -#: nova/network/base_api.py:68 +#: nova/network/base_api.py:67 msgid "instance is a required argument to use @refresh_cache" msgstr "instance is a required argument to use @refresh_cache" @@ -6294,63 +6304,63 @@ msgstr "Compute driver option required, but not specified" msgid "Loading network driver '%s'" msgstr "Loading compute driver '%s'" -#: nova/network/floating_ips.py:90 +#: nova/network/floating_ips.py:85 #, python-format msgid "Fixed ip %s not found" msgstr "" -#: nova/network/floating_ips.py:180 +#: nova/network/floating_ips.py:175 #, python-format msgid "Floating IP %s is not associated. Ignore." msgstr "" -#: nova/network/floating_ips.py:199 +#: nova/network/floating_ips.py:194 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "Address |%(address)s| is not allocated" -#: nova/network/floating_ips.py:203 +#: nova/network/floating_ips.py:198 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "Address |%(address)s| is not allocated to your project |%(project)s|" -#: nova/network/floating_ips.py:223 +#: nova/network/floating_ips.py:218 #, python-format msgid "Quota exceeded for %s, tried to allocate floating IP" msgstr "" -#: nova/network/floating_ips.py:283 +#: nova/network/floating_ips.py:277 msgid "Failed to update usages deallocating floating IP" msgstr "Failed to update usages deallocating floating IP" -#: nova/network/floating_ips.py:385 +#: nova/network/floating_ips.py:375 #, python-format msgid "Failed to disassociated floating address: %s" msgstr "" -#: nova/network/floating_ips.py:390 +#: nova/network/floating_ips.py:380 #, python-format msgid "Interface %s not found" msgstr "" -#: nova/network/floating_ips.py:553 +#: nova/network/floating_ips.py:539 #, python-format msgid "Starting migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:560 +#: nova/network/floating_ips.py:545 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will not migrate it " msgstr "" -#: nova/network/floating_ips.py:593 +#: nova/network/floating_ips.py:574 #, python-format msgid "Finishing migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:601 +#: nova/network/floating_ips.py:581 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " @@ -6359,7 +6369,7 @@ msgstr "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will notsetup it." -#: nova/network/floating_ips.py:644 +#: nova/network/floating_ips.py:624 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -6370,12 +6380,12 @@ msgstr "" "not visible to either the floating or instance DNS driver. It will be " "ignored." -#: nova/network/floating_ips.py:684 +#: nova/network/floating_ips.py:664 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." -#: nova/network/floating_ips.py:693 +#: nova/network/floating_ips.py:673 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "Domain |%(domain)s| already exists, changing project to |%(project)s|." @@ -6404,17 +6414,17 @@ msgstr "This driver only supports type 'a' entries." msgid "This shouldn't be getting called except during testing." msgstr "" -#: nova/network/linux_net.py:227 +#: nova/network/linux_net.py:232 #, python-format msgid "Attempted to remove chain %s which does not exist" msgstr "Attempted to remove chain %s which does not exist" -#: nova/network/linux_net.py:263 +#: nova/network/linux_net.py:268 #, python-format msgid "Unknown chain: %r" msgstr "Unknown chain: %r" -#: nova/network/linux_net.py:294 +#: nova/network/linux_net.py:301 #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " @@ -6423,52 +6433,52 @@ msgstr "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " "%(top)r" -#: nova/network/linux_net.py:762 +#: nova/network/linux_net.py:769 #, python-format msgid "Removed %(num)d duplicate rules for floating ip %(float)s" msgstr "" -#: nova/network/linux_net.py:810 +#: nova/network/linux_net.py:817 #, python-format msgid "Error deleting conntrack entries for %s" msgstr "" -#: nova/network/linux_net.py:1068 +#: nova/network/linux_net.py:1072 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "Hupping dnsmasq threw %s" -#: nova/network/linux_net.py:1150 +#: nova/network/linux_net.py:1154 #, python-format msgid "killing radvd threw %s" msgstr "killing radvd threw %s" -#: nova/network/linux_net.py:1302 +#: nova/network/linux_net.py:1308 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: nova/network/linux_net.py:1360 +#: nova/network/linux_net.py:1366 #, python-format msgid "Failed removing net device: '%s'" msgstr "" -#: nova/network/linux_net.py:1532 +#: nova/network/linux_net.py:1543 #, fuzzy, python-format msgid "Adding interface %(interface)s to bridge %(bridge)s" msgstr "Ensuring vlan %(vlan)s and bridge %(bridge)s" -#: nova/network/linux_net.py:1538 +#: nova/network/linux_net.py:1549 #, python-format msgid "Failed to add interface: %s" msgstr "Failed to add interface: %s" -#: nova/network/manager.py:836 +#: nova/network/manager.py:828 #, python-format msgid "instance-dns-zone not found |%s|." msgstr "" -#: nova/network/manager.py:843 +#: nova/network/manager.py:835 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -6479,55 +6489,55 @@ msgstr "" "|%(zone)s|. Instance is in zone |%(zone2)s|. No DNS record will be " "created." -#: nova/network/manager.py:882 +#: nova/network/manager.py:874 #, python-format msgid "Quota exceeded for %s, tried to allocate fixed IP" msgstr "" -#: nova/network/manager.py:942 +#: nova/network/manager.py:934 msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required." msgstr "" -#: nova/network/manager.py:972 +#: nova/network/manager.py:964 #, fuzzy msgid "Failed to update usages deallocating fixed IP" msgstr "Failed to update usages deallocating floating IP" -#: nova/network/manager.py:996 +#: nova/network/manager.py:988 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "Unable to release %s because vif doesn't exist." -#: nova/network/manager.py:1037 +#: nova/network/manager.py:1029 #, python-format msgid "IP %s leased that is not associated" msgstr "IP %s leased that is not associated" -#: nova/network/manager.py:1043 +#: nova/network/manager.py:1035 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "IP |%s| leased that isn't allocated" -#: nova/network/manager.py:1052 +#: nova/network/manager.py:1044 #, python-format msgid "IP %s released that is not associated" msgstr "IP %s released that is not associated" -#: nova/network/manager.py:1056 +#: nova/network/manager.py:1048 #, python-format msgid "IP %s released that was not leased" msgstr "IP %s released that was not leased" -#: nova/network/manager.py:1074 +#: nova/network/manager.py:1066 #, python-format msgid "%s must be an integer" msgstr "%s must be an integer" -#: nova/network/manager.py:1106 +#: nova/network/manager.py:1098 msgid "Maximum allowed length for 'label' is 255." msgstr "Maximum allowed length for 'label' is 255." -#: nova/network/manager.py:1126 +#: nova/network/manager.py:1118 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " @@ -6536,16 +6546,16 @@ msgstr "" "Subnet(s) too large, defaulting to /%s. To override, specify " "network_size flag." -#: nova/network/manager.py:1211 +#: nova/network/manager.py:1203 msgid "cidr already in use" msgstr "cidr already in use" -#: nova/network/manager.py:1214 +#: nova/network/manager.py:1206 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" -#: nova/network/manager.py:1225 +#: nova/network/manager.py:1217 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " @@ -6554,12 +6564,12 @@ msgstr "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " "(%(smaller)s)" -#: nova/network/manager.py:1320 +#: nova/network/manager.py:1311 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "Network must be disassociated from project %s before delete" -#: nova/network/manager.py:1949 +#: nova/network/manager.py:1937 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" @@ -6567,7 +6577,7 @@ msgstr "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" -#: nova/network/manager.py:1956 +#: nova/network/manager.py:1944 #, fuzzy, python-format msgid "" "The network range is not big enough to fit %(num_networks)s networks. " @@ -6796,22 +6806,22 @@ msgstr "This rule already exists in group %s" msgid "Error setting %(attr)s" msgstr "error setting admin password" -#: nova/objects/base.py:247 +#: nova/objects/base.py:256 #, python-format msgid "Unable to instantiate unregistered object type %(objtype)s" msgstr "" -#: nova/objects/base.py:366 +#: nova/objects/base.py:375 #, python-format msgid "Cannot load '%s' in the base class" msgstr "" -#: nova/objects/base.py:412 +#: nova/objects/base.py:421 #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "" -#: nova/objects/block_device.py:136 +#: nova/objects/block_device.py:149 msgid "Volume does not belong to the requested instance." msgstr "" @@ -6825,44 +6835,44 @@ msgstr "" msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s" msgstr "" -#: nova/objects/fields.py:157 +#: nova/objects/fields.py:165 #, python-format msgid "Field `%s' cannot be None" msgstr "" -#: nova/objects/fields.py:232 +#: nova/objects/fields.py:246 #, python-format msgid "A string is required here, not %s" msgstr "" -#: nova/objects/fields.py:268 +#: nova/objects/fields.py:286 msgid "A datetime.datetime is required here" msgstr "" -#: nova/objects/fields.py:306 nova/objects/fields.py:315 -#: nova/objects/fields.py:324 +#: nova/objects/fields.py:328 nova/objects/fields.py:337 +#: nova/objects/fields.py:346 #, python-format msgid "Network \"%s\" is not valid" msgstr "" -#: nova/objects/fields.py:363 +#: nova/objects/fields.py:385 msgid "A list is required here" msgstr "" -#: nova/objects/fields.py:379 +#: nova/objects/fields.py:405 msgid "A dict is required here" msgstr "" -#: nova/objects/fields.py:418 +#: nova/objects/fields.py:449 #, python-format msgid "An object of type %s is required here" msgstr "" -#: nova/objects/fields.py:445 +#: nova/objects/fields.py:488 msgid "A NetworkModel is required here" msgstr "" -#: nova/objects/instance.py:432 +#: nova/objects/instance.py:431 #, python-format msgid "No save handler for %s" msgstr "" @@ -6890,7 +6900,7 @@ msgstr "" msgid "Snapshot list encountered but no header found!" msgstr "" -#: nova/openstack/common/lockutils.py:102 +#: nova/openstack/common/lockutils.py:101 #, python-format msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" @@ -6915,7 +6925,7 @@ msgstr "syslog facility must be one of: %s" msgid "Fatal call to deprecated config: %(msg)s" msgstr "Fatal call to deprecated config %(msg)s" -#: nova/openstack/common/periodic_task.py:39 +#: nova/openstack/common/periodic_task.py:40 #, python-format msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" @@ -6979,12 +6989,12 @@ msgstr "" msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: nova/openstack/common/strutils.py:202 +#: nova/openstack/common/strutils.py:197 #, python-format msgid "Invalid unit system: \"%s\"" msgstr "" -#: nova/openstack/common/strutils.py:211 +#: nova/openstack/common/strutils.py:206 #, python-format msgid "Invalid string format: %s" msgstr "" @@ -7099,54 +7109,54 @@ msgstr "Attempting to build %(num_instances)d instance(s)" msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s" msgstr "Destroying VDIs for Instance %(instance_uuid)s" -#: nova/scheduler/filter_scheduler.py:170 +#: nova/scheduler/filter_scheduler.py:169 msgid "Instance disappeared during scheduling" msgstr "" -#: nova/scheduler/host_manager.py:173 +#: nova/scheduler/host_manager.py:169 #, python-format msgid "Metric name unknown of %r" msgstr "" -#: nova/scheduler/host_manager.py:188 +#: nova/scheduler/host_manager.py:184 #, python-format msgid "" "Host has more disk space than database expected (%(physical)sgb > " "%(database)sgb)" msgstr "" -#: nova/scheduler/host_manager.py:365 +#: nova/scheduler/host_manager.py:311 #, fuzzy, python-format msgid "Host filter ignoring hosts: %s" msgstr "Host filter fails for ignored host %(host)s" -#: nova/scheduler/host_manager.py:377 +#: nova/scheduler/host_manager.py:323 #, fuzzy, python-format msgid "Host filter forcing available hosts to %s" msgstr "Host filter fails for non-forced host %(host)s" -#: nova/scheduler/host_manager.py:380 +#: nova/scheduler/host_manager.py:326 #, python-format msgid "No hosts matched due to not matching 'force_hosts' value of '%s'" msgstr "" -#: nova/scheduler/host_manager.py:393 +#: nova/scheduler/host_manager.py:339 #, fuzzy, python-format msgid "Host filter forcing available nodes to %s" msgstr "Host filter fails for non-forced host %(host)s" -#: nova/scheduler/host_manager.py:396 +#: nova/scheduler/host_manager.py:342 #, python-format msgid "No nodes matched due to not matching 'force_nodes' value of '%s'" msgstr "" -#: nova/scheduler/host_manager.py:444 +#: nova/scheduler/host_manager.py:390 #: nova/scheduler/filters/trusted_filter.py:208 #, python-format msgid "No service for compute ID %s" msgstr "No service for compute ID %s" -#: nova/scheduler/host_manager.py:462 +#: nova/scheduler/host_manager.py:408 #, python-format msgid "Removing dead compute node %(host)s:%(node)s from scheduler" msgstr "" @@ -7302,12 +7312,16 @@ msgstr "" msgid "already detached" msgstr "already detached" -#: nova/tests/api/test_auth.py:97 +#: nova/tests/api/test_auth.py:98 msgid "unexpected role header" msgstr "unexpected role header" -#: nova/tests/api/openstack/compute/test_servers.py:3202 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2425 +#: nova/tests/api/openstack/test_faults.py:46 +msgid "Should be translated." +msgstr "" + +#: nova/tests/api/openstack/compute/test_servers.py:3225 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2434 msgid "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" @@ -7315,13 +7329,13 @@ msgstr "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" -#: nova/tests/api/openstack/compute/test_servers.py:3207 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2430 +#: nova/tests/api/openstack/compute/test_servers.py:3230 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2439 msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" msgstr "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" -#: nova/tests/api/openstack/compute/test_servers.py:3212 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2435 +#: nova/tests/api/openstack/compute/test_servers.py:3235 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2444 msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" @@ -7329,7 +7343,7 @@ msgstr "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" #: nova/tests/compute/test_compute.py:1707 #: nova/tests/compute/test_compute.py:1785 #: nova/tests/compute/test_compute.py:1825 -#: nova/tests/compute/test_compute.py:5546 +#: nova/tests/compute/test_compute.py:5603 #, python-format msgid "Running instances: %s" msgstr "Running instances: %s" @@ -7341,16 +7355,16 @@ msgstr "Running instances: %s" msgid "After terminating instances: %s" msgstr "After terminating instances: %s" -#: nova/tests/compute/test_compute.py:5557 +#: nova/tests/compute/test_compute.py:5614 #, python-format msgid "After force-killing instances: %s" msgstr "After force-killing instances: %s" -#: nova/tests/compute/test_compute.py:6173 +#: nova/tests/compute/test_compute.py:6229 msgid "wrong host/node" msgstr "" -#: nova/tests/compute/test_compute.py:10753 +#: nova/tests/compute/test_compute.py:10820 #, fuzzy msgid "spawn error" msgstr "unknown guestmount error" @@ -7446,35 +7460,58 @@ msgstr "Body: %s" msgid "Unexpected status code" msgstr "Unexpected status code" -#: nova/tests/virt/hyperv/test_hypervapi.py:512 +#: nova/tests/virt/hyperv/test_hypervapi.py:517 #, fuzzy msgid "fake vswitch not found" msgstr "marker [%s] not found" -#: nova/tests/virt/hyperv/test_hypervapi.py:965 +#: nova/tests/virt/hyperv/test_hypervapi.py:970 msgid "Simulated failure" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1019 +#: nova/tests/virt/libvirt/fakelibvirt.py:1041 msgid "Expected a list for 'auth' parameter" msgstr "Expected a list for 'auth' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1023 +#: nova/tests/virt/libvirt/fakelibvirt.py:1045 msgid "Expected a function in 'auth[0]' parameter" msgstr "Expected a function in 'auth[0]' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1027 +#: nova/tests/virt/libvirt/fakelibvirt.py:1049 msgid "Expected a function in 'auth[1]' parameter" msgstr "Expected a function in 'auth[1]' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1038 +#: nova/tests/virt/libvirt/fakelibvirt.py:1060 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." msgstr "" -#: nova/tests/virt/vmwareapi/test_vm_util.py:196 -#: nova/virt/vmwareapi/vm_util.py:1087 +#: nova/tests/virt/vmwareapi/fake.py:244 +#, python-format +msgid "Property %(attr)s not set for the managed object %(name)s" +msgstr "Property %(attr)s not set for the managed object %(name)s" + +#: nova/tests/virt/vmwareapi/fake.py:969 +msgid "There is no VM registered" +msgstr "There is no VM registered" + +#: nova/tests/virt/vmwareapi/fake.py:971 nova/tests/virt/vmwareapi/fake.py:1307 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "Virtual Machine with ref %s is not there" + +#: nova/tests/virt/vmwareapi/fake.py:1096 +msgid "Session Invalid" +msgstr "Session Invalid" + +#: nova/tests/virt/vmwareapi/fake.py:1304 +#, fuzzy +msgid "No Virtual Machine has been registered yet" +msgstr " No Virtual Machine has been registered yet" + +#: nova/tests/virt/vmwareapi/test_ds_util.py:221 +#: nova/virt/vmwareapi/ds_util.py:265 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7486,15 +7523,15 @@ msgid "" "left to copy" msgstr "" -#: nova/tests/virt/xenapi/image/test_bittorrent.py:126 -#: nova/virt/xenapi/image/bittorrent.py:81 +#: nova/tests/virt/xenapi/image/test_bittorrent.py:125 +#: nova/virt/xenapi/image/bittorrent.py:80 msgid "" "Cannot create default bittorrent URL without torrent_base_url set or " "torrent URL fetcher extension" msgstr "" -#: nova/tests/virt/xenapi/image/test_bittorrent.py:160 -#: nova/virt/xenapi/image/bittorrent.py:85 +#: nova/tests/virt/xenapi/image/test_bittorrent.py:159 +#: nova/virt/xenapi/image/bittorrent.py:84 msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "" @@ -7508,80 +7545,100 @@ msgstr "" msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "Booting with volume %(volume_id)s at %(mountpoint)s" -#: nova/virt/cpu.py:56 nova/virt/cpu.py:60 -#, python-format -msgid "Invalid range expression %r" -msgstr "" - -#: nova/virt/cpu.py:69 -#, fuzzy, python-format -msgid "Invalid exclusion expression %r" -msgstr "Invalid reservation expiration %(expire)s." - -#: nova/virt/cpu.py:76 -#, fuzzy, python-format -msgid "Invalid inclusion expression %r" -msgstr "Invalid reservation expiration %(expire)s." - -#: nova/virt/cpu.py:81 -#, python-format -msgid "No CPUs available after parsing %r" -msgstr "" - -#: nova/virt/driver.py:1207 +#: nova/virt/driver.py:1242 msgid "Event must be an instance of nova.virt.event.Event" msgstr "" -#: nova/virt/driver.py:1213 +#: nova/virt/driver.py:1248 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "" -#: nova/virt/driver.py:1295 +#: nova/virt/driver.py:1330 msgid "Compute driver option required, but not specified" msgstr "Compute driver option required, but not specified" -#: nova/virt/driver.py:1298 +#: nova/virt/driver.py:1333 #, python-format msgid "Loading compute driver '%s'" msgstr "Loading compute driver '%s'" -#: nova/virt/driver.py:1305 +#: nova/virt/driver.py:1340 #, fuzzy msgid "Unable to load the virtualization driver" msgstr "Unable to load the virtualization driver: %s" -#: nova/virt/fake.py:216 +#: nova/virt/event.py:33 +msgid "Started" +msgstr "" + +#: nova/virt/event.py:34 +msgid "Stopped" +msgstr "" + +#: nova/virt/event.py:35 +msgid "Paused" +msgstr "" + +#: nova/virt/event.py:36 +msgid "Resumed" +msgstr "" + +#: nova/virt/event.py:108 +msgid "Unknown" +msgstr "" + +#: nova/virt/fake.py:217 #, python-format msgid "Key '%(key)s' not in instances '%(inst)s'" msgstr "" -#: nova/virt/firewall.py:178 +#: nova/virt/firewall.py:176 msgid "Attempted to unfilter instance which is not filtered" msgstr "Attempted to unfilter instance which is not filtered" -#: nova/virt/images.py:86 +#: nova/virt/hardware.py:45 +#, python-format +msgid "No CPUs available after parsing %r" +msgstr "" + +#: nova/virt/hardware.py:77 nova/virt/hardware.py:81 +#, python-format +msgid "Invalid range expression %r" +msgstr "" + +#: nova/virt/hardware.py:90 +#, fuzzy, python-format +msgid "Invalid exclusion expression %r" +msgstr "Invalid reservation expiration %(expire)s." + +#: nova/virt/hardware.py:97 +#, fuzzy, python-format +msgid "Invalid inclusion expression %r" +msgstr "Invalid reservation expiration %(expire)s." + +#: nova/virt/images.py:81 msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' parsing failed." -#: nova/virt/images.py:92 +#: nova/virt/images.py:87 #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s backed by: %(backing_file)s" -#: nova/virt/images.py:105 +#: nova/virt/images.py:100 #, python-format msgid "" "%(base)s virtual size %(disk_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/images.py:122 +#: nova/virt/images.py:117 #, python-format msgid "Converted to raw, but format is now %s" msgstr "Converted to raw, but format is now %s" -#: nova/virt/storage_users.py:63 nova/virt/storage_users.py:101 +#: nova/virt/storage_users.py:64 nova/virt/storage_users.py:102 #, python-format msgid "Cannot decode JSON from %(id_path)s" msgstr "" @@ -7624,27 +7681,27 @@ msgstr "" msgid "Baremetal power manager failed to restart node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:375 +#: nova/virt/baremetal/driver.py:376 #, fuzzy, python-format msgid "Destroy called on non-existing instance %s" msgstr "get_info called for instance" -#: nova/virt/baremetal/driver.py:393 +#: nova/virt/baremetal/driver.py:394 #, python-format msgid "Error from baremetal driver during destroy: %s" msgstr "" -#: nova/virt/baremetal/driver.py:398 +#: nova/virt/baremetal/driver.py:399 #, python-format msgid "Error while recording destroy failure in baremetal database: %s" msgstr "" -#: nova/virt/baremetal/driver.py:413 +#: nova/virt/baremetal/driver.py:414 #, python-format msgid "Baremetal power manager failed to stop node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:426 +#: nova/virt/baremetal/driver.py:427 #, python-format msgid "Baremetal power manager failed to start node for instance %r" msgstr "" @@ -7888,16 +7945,16 @@ msgstr "" msgid "baremetal driver was unable to delete tid %s" msgstr "" -#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:189 +#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:196 msgid "Could not determine iscsi initiator name" msgstr "Could not determine iscsi initiator name" -#: nova/virt/baremetal/volume_driver.py:234 +#: nova/virt/baremetal/volume_driver.py:225 #, fuzzy, python-format msgid "No fixed PXE IP is associated to %s" msgstr "No fixed ips associated to instance" -#: nova/virt/baremetal/volume_driver.py:288 +#: nova/virt/baremetal/volume_driver.py:283 #, python-format msgid "detach volume could not find tid for %s" msgstr "" @@ -7927,16 +7984,16 @@ msgstr "Virtual Interface creation failed" msgid "Baremetal virtual interface %s not found" msgstr "partition %s not found" -#: nova/virt/disk/api.py:285 +#: nova/virt/disk/api.py:280 msgid "image already mounted" msgstr "image already mounted" -#: nova/virt/disk/api.py:359 +#: nova/virt/disk/api.py:354 #, fuzzy, python-format msgid "Ignoring error injecting data into image (%(e)s)" msgstr "Ignoring error injecting data into image %(img_id)s (%(e)s)" -#: nova/virt/disk/api.py:381 +#: nova/virt/disk/api.py:376 #, python-format msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': " @@ -7945,31 +8002,31 @@ msgstr "" "Failed to mount container filesystem '%(image)s' on '%(target)s': " "%(errors)s" -#: nova/virt/disk/api.py:411 +#: nova/virt/disk/api.py:406 #, python-format msgid "Failed to teardown container filesystem: %s" msgstr "" -#: nova/virt/disk/api.py:424 +#: nova/virt/disk/api.py:419 #, fuzzy, python-format msgid "Failed to umount container filesystem: %s" msgstr "Failed to unmount container filesystem: %s" -#: nova/virt/disk/api.py:449 +#: nova/virt/disk/api.py:444 #, fuzzy, python-format msgid "Ignoring error injecting %(inject)s into image (%(e)s)" msgstr "Ignoring error injecting data into image %(img_id)s (%(e)s)" -#: nova/virt/disk/api.py:609 +#: nova/virt/disk/api.py:604 msgid "Not implemented on Windows" msgstr "Not implemented on Windows" -#: nova/virt/disk/api.py:636 +#: nova/virt/disk/api.py:631 #, python-format msgid "User %(username)s not found in password file." msgstr "User %(username)s not found in password file." -#: nova/virt/disk/api.py:652 +#: nova/virt/disk/api.py:647 #, python-format msgid "User %(username)s not found in shadow file." msgstr "User %(username)s not found in shadow file." @@ -8101,11 +8158,11 @@ msgstr "injected file path not valid" msgid "The ISCSI initiator name can't be found. Choosing the default one" msgstr "The ISCSI initiator name can't be found. Choosing the default one" -#: nova/virt/hyperv/driver.py:165 +#: nova/virt/hyperv/driver.py:169 msgid "VIF plugging is not supported by the Hyper-V driver." msgstr "" -#: nova/virt/hyperv/driver.py:170 +#: nova/virt/hyperv/driver.py:174 msgid "VIF unplugging is not supported by the Hyper-V driver." msgstr "" @@ -8187,7 +8244,7 @@ msgstr "Created switch port %(vm_name)s on switch %(ext_path)s" msgid "No external vswitch found" msgstr "" -#: nova/virt/hyperv/pathutils.py:71 +#: nova/virt/hyperv/pathutils.py:72 #, python-format msgid "The file copy from %(src)s to %(dest)s failed" msgstr "" @@ -8197,30 +8254,30 @@ msgstr "" msgid "Failed to remove snapshot for VM %s" msgstr "Failed to remove snapshot for VM %s" -#: nova/virt/hyperv/vhdutils.py:65 nova/virt/hyperv/vhdutilsv2.py:63 +#: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64 #, python-format msgid "Unsupported disk format: %s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:150 +#: nova/virt/hyperv/vhdutils.py:151 #, python-format msgid "The %(vhd_type)s type VHD is not supported" msgstr "" -#: nova/virt/hyperv/vhdutils.py:161 +#: nova/virt/hyperv/vhdutils.py:162 #, python-format msgid "Unable to obtain block size from VHD %(vhd_path)s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:208 +#: nova/virt/hyperv/vhdutils.py:209 msgid "Unsupported virtual disk format" msgstr "" -#: nova/virt/hyperv/vhdutilsv2.py:134 +#: nova/virt/hyperv/vhdutilsv2.py:135 msgid "Differencing VHDX images are not supported" msgstr "" -#: nova/virt/hyperv/vhdutilsv2.py:157 +#: nova/virt/hyperv/vhdutilsv2.py:158 #, python-format msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s" msgstr "" @@ -8242,12 +8299,12 @@ msgstr "" msgid "Spawning new instance" msgstr "Starting instance" -#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:520 +#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:567 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "" -#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:524 +#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:571 msgid "Using config drive for instance" msgstr "" @@ -8256,7 +8313,7 @@ msgstr "" msgid "Creating config drive at %(path)s" msgstr "Creating config drive at %(path)s" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:549 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:596 #, fuzzy, python-format msgid "Creating config drive failed with error: %s" msgstr "Creating config drive at %(path)s" @@ -8312,12 +8369,12 @@ msgstr "" msgid "Metrics collection is not supported on this version of Hyper-V" msgstr "" -#: nova/virt/hyperv/volumeops.py:146 +#: nova/virt/hyperv/volumeops.py:148 #, python-format msgid "Unable to attach volume to instance %s" msgstr "Unable to attach volume to instance %s" -#: nova/virt/hyperv/volumeops.py:215 nova/virt/hyperv/volumeops.py:229 +#: nova/virt/hyperv/volumeops.py:222 nova/virt/hyperv/volumeops.py:236 #, python-format msgid "Unable to find a mounted disk for target_iqn: %s" msgstr "Unable to find a mounted disk for target_iqn: %s" @@ -8347,78 +8404,78 @@ msgstr "" msgid "Unable to determine disk bus for '%s'" msgstr "Unable to find vbd for vdi %s" -#: nova/virt/libvirt/driver.py:542 +#: nova/virt/libvirt/driver.py:556 #, python-format msgid "Connection to libvirt lost: %s" msgstr "" -#: nova/virt/libvirt/driver.py:724 +#: nova/virt/libvirt/driver.py:739 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "Can not handle authentication request for %d credentials" -#: nova/virt/libvirt/driver.py:868 +#: nova/virt/libvirt/driver.py:932 msgid "operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:1187 +#: nova/virt/libvirt/driver.py:1257 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" -#: nova/virt/libvirt/driver.py:1194 +#: nova/virt/libvirt/driver.py:1264 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" -#: nova/virt/libvirt/driver.py:1292 +#: nova/virt/libvirt/driver.py:1352 msgid "Swap only supports host devices" msgstr "" -#: nova/virt/libvirt/driver.py:1579 +#: nova/virt/libvirt/driver.py:1635 msgid "libvirt error while requesting blockjob info." msgstr "" -#: nova/virt/libvirt/driver.py:1712 +#: nova/virt/libvirt/driver.py:1776 msgid "Found no disk to snapshot." msgstr "" -#: nova/virt/libvirt/driver.py:1790 +#: nova/virt/libvirt/driver.py:1868 #, python-format msgid "Unknown type: %s" msgstr "" -#: nova/virt/libvirt/driver.py:1795 +#: nova/virt/libvirt/driver.py:1873 msgid "snapshot_id required in create_info" msgstr "" -#: nova/virt/libvirt/driver.py:1853 +#: nova/virt/libvirt/driver.py:1931 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" -#: nova/virt/libvirt/driver.py:1860 +#: nova/virt/libvirt/driver.py:1938 #, python-format msgid "Unknown delete_info type %s" msgstr "" -#: nova/virt/libvirt/driver.py:1890 +#: nova/virt/libvirt/driver.py:1966 #, python-format -msgid "Unable to locate disk matching id: %s" +msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2330 nova/virt/xenapi/vmops.py:1552 +#: nova/virt/libvirt/driver.py:2407 nova/virt/xenapi/vmops.py:1552 msgid "Guest does not have a console available" msgstr "Guest does not have a console available" -#: nova/virt/libvirt/driver.py:2746 +#: nova/virt/libvirt/driver.py:2823 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "" -#: nova/virt/libvirt/driver.py:2912 +#: nova/virt/libvirt/driver.py:2989 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " @@ -8427,15 +8484,15 @@ msgstr "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" -#: nova/virt/libvirt/driver.py:2918 +#: nova/virt/libvirt/driver.py:2995 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "Config requested a custom CPU model, but no model name was provided" -#: nova/virt/libvirt/driver.py:2922 +#: nova/virt/libvirt/driver.py:2999 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "A CPU model name should not be set when a host CPU model is requested" -#: nova/virt/libvirt/driver.py:2942 +#: nova/virt/libvirt/driver.py:3019 msgid "" "Passthrough of the host CPU was requested but this libvirt version does " "not support this feature" @@ -8443,14 +8500,14 @@ msgstr "" "Passthrough of the host CPU was requested but this libvirt version does " "not support this feature" -#: nova/virt/libvirt/driver.py:3475 +#: nova/virt/libvirt/driver.py:3567 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3496 +#: nova/virt/libvirt/driver.py:3588 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " @@ -8459,23 +8516,23 @@ msgstr "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3760 +#: nova/virt/libvirt/driver.py:3851 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "" -#: nova/virt/libvirt/driver.py:3890 +#: nova/virt/libvirt/driver.py:3974 msgid "libvirt version is too old (does not support getVersion)" msgstr "libvirt version is too old (does not support getVersion)" -#: nova/virt/libvirt/driver.py:4251 +#: nova/virt/libvirt/driver.py:4335 msgid "Block migration can not be used with shared storage." msgstr "Block migration can not be used with shared storage." -#: nova/virt/libvirt/driver.py:4259 +#: nova/virt/libvirt/driver.py:4344 msgid "Live migration can not be used without shared storage." msgstr "Live migration can not be used without shared storage." -#: nova/virt/libvirt/driver.py:4303 +#: nova/virt/libvirt/driver.py:4414 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " @@ -8484,7 +8541,7 @@ msgstr "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" -#: nova/virt/libvirt/driver.py:4342 +#: nova/virt/libvirt/driver.py:4453 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8499,42 +8556,68 @@ msgstr "" "\n" "Refer to %(u)s" -#: nova/virt/libvirt/driver.py:4409 +#: nova/virt/libvirt/driver.py:4516 #, python-format msgid "The firewall filter for %s does not exist" msgstr "The firewall filter for %s does not exist" -#: nova/virt/libvirt/driver.py:4900 +#: nova/virt/libvirt/driver.py:4579 +msgid "" +"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " +"or your destination node does not support retrieving listen addresses. " +"In order for live migration to work properly, you must configure the " +"graphics (VNC and/or SPICE) listen addresses to be either the catch-all " +"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." +msgstr "" + +#: nova/virt/libvirt/driver.py:4596 +msgid "" +"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," +" and the graphics (VNC and/or SPICE) listen addresses on the destination" +" node do not match the addresses on the source node. Since the source " +"node has listen addresses set to either the catch-all address (0.0.0.0 or" +" ::) or the local address (127.0.0.1 or ::1), the live migration will " +"succeed, but the VM will continue to listen on the current addresses." +msgstr "" + +#: nova/virt/libvirt/driver.py:4964 +#, python-format +msgid "" +"Error from libvirt while getting description of %(instance_name)s: [Error" +" Code %(error_code)s] %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:5090 msgid "Unable to resize disk down." msgstr "" -#: nova/virt/libvirt/imagebackend.py:258 +#: nova/virt/libvirt/imagebackend.py:257 #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:273 +#: nova/virt/libvirt/imagebackend.py:272 msgid "Attempted overwrite of an existing value." msgstr "" -#: nova/virt/libvirt/imagebackend.py:429 +#: nova/virt/libvirt/imagebackend.py:433 msgid "You should specify images_volume_group flag to use LVM images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:544 +#: nova/virt/libvirt/imagebackend.py:548 msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:658 +#: nova/virt/libvirt/imagebackend.py:660 msgid "rbd python libraries not found" msgstr "" -#: nova/virt/libvirt/imagebackend.py:697 +#: nova/virt/libvirt/imagebackend.py:703 #, python-format msgid "Unknown image_type=%s" msgstr "Unknown image_type=%s" -#: nova/virt/libvirt/lvm.py:55 +#: nova/virt/libvirt/lvm.py:54 #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db " @@ -8543,17 +8626,17 @@ msgstr "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db " "available, but %(size)db required by volume %(lv)s." -#: nova/virt/libvirt/lvm.py:103 +#: nova/virt/libvirt/lvm.py:102 #, fuzzy, python-format msgid "vg %s must be LVM volume group" msgstr "Path %s must be LVM logical volume" -#: nova/virt/libvirt/lvm.py:146 +#: nova/virt/libvirt/lvm.py:145 #, python-format msgid "Path %s must be LVM logical volume" msgstr "Path %s must be LVM logical volume" -#: nova/virt/libvirt/lvm.py:222 +#: nova/virt/libvirt/lvm.py:221 #, python-format msgid "volume_clear='%s' is not handled" msgstr "" @@ -8562,275 +8645,235 @@ msgstr "" msgid "Cannot find any Fibre Channel HBAs" msgstr "" -#: nova/virt/libvirt/utils.py:431 +#: nova/virt/libvirt/utils.py:437 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "Can't retrieve root device path from instance libvirt configuration" -#: nova/virt/libvirt/vif.py:353 nova/virt/libvirt/vif.py:608 -#: nova/virt/libvirt/vif.py:797 +#: nova/virt/libvirt/vif.py:356 nova/virt/libvirt/vif.py:574 +#: nova/virt/libvirt/vif.py:750 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" -#: nova/virt/libvirt/vif.py:397 nova/virt/libvirt/vif.py:628 -#: nova/virt/libvirt/vif.py:817 +#: nova/virt/libvirt/vif.py:362 nova/virt/libvirt/vif.py:580 +#: nova/virt/libvirt/vif.py:756 #, fuzzy, python-format msgid "Unexpected vif_type=%s" msgstr "Unexpected error: %s" -#: nova/virt/libvirt/volume.py:291 +#: nova/virt/libvirt/volume.py:294 #, python-format msgid "iSCSI device not found at %s" msgstr "iSCSI device not found at %s" -#: nova/virt/libvirt/volume.py:737 +#: nova/virt/libvirt/volume.py:740 #, fuzzy, python-format msgid "AoE device not found at %s" msgstr "iSCSI device not found at %s" -#: nova/virt/libvirt/volume.py:909 +#: nova/virt/libvirt/volume.py:912 msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: nova/virt/libvirt/volume.py:928 +#: nova/virt/libvirt/volume.py:931 #, fuzzy msgid "Fibre Channel device not found." msgstr "iSCSI device not found at %s" -#: nova/virt/vmwareapi/driver.py:103 +#: nova/virt/vmwareapi/driver.py:104 msgid "" "The VMware ESX driver is now deprecated and will be removed in the Juno " "release. The VC driver will remain and continue to be supported." msgstr "" -#: nova/virt/vmwareapi/driver.py:115 +#: nova/virt/vmwareapi/driver.py:116 msgid "" "Must specify host_ip, host_username and host_password to use " "compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver" msgstr "" -#: nova/virt/vmwareapi/driver.py:127 +#: nova/virt/vmwareapi/driver.py:128 #, python-format msgid "Invalid Regular Expression %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:242 +#: nova/virt/vmwareapi/driver.py:243 msgid "Instance cannot be found in host, or in an unknownstate." msgstr "" -#: nova/virt/vmwareapi/driver.py:398 +#: nova/virt/vmwareapi/driver.py:403 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "" -#: nova/virt/vmwareapi/driver.py:407 +#: nova/virt/vmwareapi/driver.py:412 #, python-format msgid "The following clusters could not be found in the vCenter %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:544 +#: nova/virt/vmwareapi/driver.py:551 #, python-format msgid "The resource %s does not exist" msgstr "" -#: nova/virt/vmwareapi/driver.py:590 +#: nova/virt/vmwareapi/driver.py:597 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:757 +#: nova/virt/vmwareapi/driver.py:771 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." msgstr "" -#: nova/virt/vmwareapi/driver.py:845 -#, python-format -msgid "" -"Unable to connect to server at %(server)s, sleeping for %(seconds)s " -"seconds" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:865 +#: nova/virt/vmwareapi/driver.py:884 #, python-format msgid "Unable to validate session %s!" msgstr "" -#: nova/virt/vmwareapi/driver.py:906 +#: nova/virt/vmwareapi/driver.py:926 #, python-format msgid "Session %s is inactive!" msgstr "" -#: nova/virt/vmwareapi/driver.py:954 -#, python-format -msgid "In vmwareapi: _call_method (session=%s)" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:998 +#: nova/virt/vmwareapi/driver.py:1017 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" -#: nova/virt/vmwareapi/driver.py:1008 +#: nova/virt/vmwareapi/driver.py:1027 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "In vmwareapi:_poll_task, Got this error %s" -#: nova/virt/vmwareapi/ds_util.py:38 +#: nova/virt/vmwareapi/ds_util.py:41 msgid "Datastore name cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:40 +#: nova/virt/vmwareapi/ds_util.py:43 msgid "Datastore reference cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:42 +#: nova/virt/vmwareapi/ds_util.py:45 msgid "Invalid capacity" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:45 +#: nova/virt/vmwareapi/ds_util.py:48 msgid "Capacity is smaller than free space" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:106 +#: nova/virt/vmwareapi/ds_util.py:109 msgid "datastore name empty" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:111 +#: nova/virt/vmwareapi/ds_util.py:114 nova/virt/vmwareapi/ds_util.py:146 msgid "path component cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:144 +#: nova/virt/vmwareapi/ds_util.py:160 msgid "datastore path empty" msgstr "" -#: nova/virt/vmwareapi/error_util.py:46 +#: nova/virt/vmwareapi/error_util.py:45 msgid "exception_summary must not be a list" msgstr "" -#: nova/virt/vmwareapi/error_util.py:76 +#: nova/virt/vmwareapi/error_util.py:75 msgid "fault_list must be a list" msgstr "" -#: nova/virt/vmwareapi/error_util.py:122 +#: nova/virt/vmwareapi/error_util.py:121 #, python-format msgid "Error(s) %s occurred in the call to RetrievePropertiesEx" msgstr "" -#: nova/virt/vmwareapi/error_util.py:136 +#: nova/virt/vmwareapi/error_util.py:135 msgid "VMware Driver fault." msgstr "" -#: nova/virt/vmwareapi/error_util.py:142 +#: nova/virt/vmwareapi/error_util.py:141 msgid "VMware Driver configuration fault." msgstr "" -#: nova/virt/vmwareapi/error_util.py:146 +#: nova/virt/vmwareapi/error_util.py:145 msgid "No default value for use_linked_clone found." msgstr "" -#: nova/virt/vmwareapi/error_util.py:150 +#: nova/virt/vmwareapi/error_util.py:149 #, python-format msgid "Missing parameter : %(param)s" msgstr "" -#: nova/virt/vmwareapi/error_util.py:154 +#: nova/virt/vmwareapi/error_util.py:153 msgid "No root disk defined." msgstr "" -#: nova/virt/vmwareapi/error_util.py:158 +#: nova/virt/vmwareapi/error_util.py:157 msgid "Resource already exists." msgstr "" -#: nova/virt/vmwareapi/error_util.py:163 +#: nova/virt/vmwareapi/error_util.py:162 msgid "Cannot delete file." msgstr "" -#: nova/virt/vmwareapi/error_util.py:168 +#: nova/virt/vmwareapi/error_util.py:167 msgid "File already exists." msgstr "" -#: nova/virt/vmwareapi/error_util.py:173 +#: nova/virt/vmwareapi/error_util.py:172 msgid "File fault." msgstr "" -#: nova/virt/vmwareapi/error_util.py:178 +#: nova/virt/vmwareapi/error_util.py:177 msgid "File locked." msgstr "" -#: nova/virt/vmwareapi/error_util.py:183 +#: nova/virt/vmwareapi/error_util.py:182 msgid "File not found." msgstr "" -#: nova/virt/vmwareapi/error_util.py:188 +#: nova/virt/vmwareapi/error_util.py:187 msgid "Invalid property." msgstr "" -#: nova/virt/vmwareapi/error_util.py:193 +#: nova/virt/vmwareapi/error_util.py:192 msgid "No Permission." msgstr "" -#: nova/virt/vmwareapi/error_util.py:198 +#: nova/virt/vmwareapi/error_util.py:197 msgid "Not Authenticated." msgstr "" -#: nova/virt/vmwareapi/error_util.py:203 +#: nova/virt/vmwareapi/error_util.py:202 msgid "Invalid Power State." msgstr "" -#: nova/virt/vmwareapi/error_util.py:228 +#: nova/virt/vmwareapi/error_util.py:227 #, python-format msgid "Fault %s not matched." msgstr "" -#: nova/virt/vmwareapi/fake.py:243 -#, python-format -msgid "Property %(attr)s not set for the managed object %(name)s" -msgstr "Property %(attr)s not set for the managed object %(name)s" - -#: nova/virt/vmwareapi/fake.py:967 -msgid "There is no VM registered" -msgstr "There is no VM registered" - -#: nova/virt/vmwareapi/fake.py:969 nova/virt/vmwareapi/fake.py:1290 -#, python-format -msgid "Virtual Machine with ref %s is not there" -msgstr "Virtual Machine with ref %s is not there" - -#: nova/virt/vmwareapi/fake.py:1052 -#, python-format -msgid "Logging out a session that is invalid or already logged out: %s" -msgstr "Logging out a session that is invalid or already logged out: %s" - -#: nova/virt/vmwareapi/fake.py:1070 -msgid "Session Invalid" -msgstr "Session Invalid" - -#: nova/virt/vmwareapi/fake.py:1287 -#, fuzzy -msgid "No Virtual Machine has been registered yet" -msgstr " No Virtual Machine has been registered yet" - #: nova/virt/vmwareapi/imagecache.py:74 #, python-format msgid "Unable to delete %(file)s. Exception: %(ex)s" msgstr "" -#: nova/virt/vmwareapi/imagecache.py:148 +#: nova/virt/vmwareapi/imagecache.py:147 #, python-format msgid "Image %s is no longer used by this node. Pending deletion!" msgstr "" -#: nova/virt/vmwareapi/imagecache.py:153 +#: nova/virt/vmwareapi/imagecache.py:152 #, python-format msgid "Image %s is no longer used. Deleting!" msgstr "" -#: nova/virt/vmwareapi/io_util.py:121 +#: nova/virt/vmwareapi/io_util.py:122 #, python-format msgid "Glance image %s is in killed state" msgstr "Glance image %s is in killed state" -#: nova/virt/vmwareapi/io_util.py:129 +#: nova/virt/vmwareapi/io_util.py:130 #, python-format msgid "Glance image %(image_id)s is in unknown state - %(state)s" msgstr "Glance image %(image_id)s is in unknown state - %(state)s" @@ -8889,84 +8932,82 @@ msgstr "Exception in %s " msgid "Unable to retrieve value for %(path)s Reason: %(reason)s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:195 +#: nova/virt/vmwareapi/vm_util.py:196 #, python-format msgid "%s is not supported." msgstr "" -#: nova/virt/vmwareapi/vm_util.py:980 +#: nova/virt/vmwareapi/vm_util.py:989 msgid "No host available on cluster" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1210 +#: nova/virt/vmwareapi/vm_util.py:1083 #, python-format msgid "Failed to get cluster references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1222 +#: nova/virt/vmwareapi/vm_util.py:1095 #, python-format msgid "Failed to get resource pool references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1404 +#: nova/virt/vmwareapi/vm_util.py:1285 msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None" msgstr "" -#: nova/virt/vmwareapi/vmops.py:131 +#: nova/virt/vmwareapi/vmops.py:132 #, python-format msgid "Extending virtual disk failed with error: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:246 +#: nova/virt/vmwareapi/vmops.py:249 msgid "Image disk size greater than requested disk size" msgstr "" -#: nova/virt/vmwareapi/vmops.py:471 -#, python-format -msgid "Root disk file creation failed - %s" -msgstr "" - -#: nova/virt/vmwareapi/vmops.py:813 +#: nova/virt/vmwareapi/vmops.py:856 msgid "instance is not powered on" msgstr "instance is not powered on" -#: nova/virt/vmwareapi/vmops.py:869 +#: nova/virt/vmwareapi/vmops.py:884 +msgid "Instance does not exist on backend" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:916 #, python-format msgid "" "In vmwareapi:vmops:_destroy_instance, got this exception while un-" "registering the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:892 -#, python-format +#: nova/virt/vmwareapi/vmops.py:939 msgid "" -"In vmwareapi:vmops:_destroy_instance, got this exception while deleting " -"the VM contents from the disk: %s" +"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM " +"contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:926 +#: nova/virt/vmwareapi/vmops.py:972 msgid "pause not supported for vmwareapi" msgstr "pause not supported for vmwareapi" -#: nova/virt/vmwareapi/vmops.py:930 +#: nova/virt/vmwareapi/vmops.py:976 msgid "unpause not supported for vmwareapi" msgstr "unpause not supported for vmwareapi" -#: nova/virt/vmwareapi/vmops.py:948 +#: nova/virt/vmwareapi/vmops.py:994 #, fuzzy msgid "instance is powered off and cannot be suspended." msgstr "instance is powered off and can not be suspended." -#: nova/virt/vmwareapi/vmops.py:968 +#: nova/virt/vmwareapi/vmops.py:1014 msgid "instance is not in a suspended state" msgstr "instance is not in a suspended state" -#: nova/virt/vmwareapi/vmops.py:1056 +#: nova/virt/vmwareapi/vmops.py:1102 #, fuzzy msgid "instance is suspended and cannot be powered off." msgstr "instance is not powered on" -#: nova/virt/vmwareapi/vmops.py:1147 +#: nova/virt/vmwareapi/vmops.py:1193 #, fuzzy, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" @@ -8975,38 +9016,38 @@ msgstr "" "In vmwareapi:vmops:destroy, got this exception while un-registering the " "VM: %s" -#: nova/virt/vmwareapi/vmops.py:1213 nova/virt/xenapi/vmops.py:1497 +#: nova/virt/vmwareapi/vmops.py:1255 nova/virt/xenapi/vmops.py:1497 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "Found %(instance_count)d hung reboots older than %(timeout)d seconds" -#: nova/virt/vmwareapi/vmops.py:1217 nova/virt/xenapi/vmops.py:1501 +#: nova/virt/vmwareapi/vmops.py:1259 nova/virt/xenapi/vmops.py:1501 msgid "Automatically hard rebooting" msgstr "Automatically hard rebooting" -#: nova/virt/vmwareapi/volumeops.py:217 nova/virt/vmwareapi/volumeops.py:251 +#: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" -#: nova/virt/vmwareapi/volumeops.py:239 nova/virt/vmwareapi/volumeops.py:414 +#: nova/virt/vmwareapi/volumeops.py:363 nova/virt/vmwareapi/volumeops.py:538 #, fuzzy msgid "Unable to find iSCSI Target" msgstr "Unable to find address %r" -#: nova/virt/vmwareapi/volumeops.py:337 +#: nova/virt/vmwareapi/volumeops.py:461 #, python-format msgid "" "The volume's backing has been relocated to %s. Need to consolidate " "backing disk file." msgstr "" -#: nova/virt/vmwareapi/volumeops.py:375 nova/virt/vmwareapi/volumeops.py:422 +#: nova/virt/vmwareapi/volumeops.py:499 nova/virt/vmwareapi/volumeops.py:546 #, fuzzy msgid "Unable to find volume" msgstr "Failed to find volume in db" -#: nova/virt/vmwareapi/volumeops.py:395 nova/virt/vmwareapi/volumeops.py:424 +#: nova/virt/vmwareapi/volumeops.py:519 nova/virt/vmwareapi/volumeops.py:548 #: nova/virt/xenapi/volumeops.py:148 #, python-format msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" @@ -9097,16 +9138,16 @@ msgstr "Failure while cleaning up attached VDIs" msgid "Could not determine key: %s" msgstr "Could not determine key: %s" -#: nova/virt/xenapi/driver.py:632 +#: nova/virt/xenapi/driver.py:636 msgid "Host startup on XenServer is not supported." msgstr "Host startup on XenServer is not supported." -#: nova/virt/xenapi/fake.py:812 +#: nova/virt/xenapi/fake.py:811 #, python-format msgid "xenapi.fake does not have an implementation for %s" msgstr "xenapi.fake does not have an implementation for %s" -#: nova/virt/xenapi/fake.py:920 +#: nova/virt/xenapi/fake.py:919 #, python-format msgid "" "xenapi.fake does not have an implementation for %s or it has been called " @@ -9237,127 +9278,127 @@ msgid "" "%(version)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:325 nova/virt/xenapi/vm_utils.py:340 +#: nova/virt/xenapi/vm_utils.py:326 nova/virt/xenapi/vm_utils.py:341 msgid "VM already halted, skipping shutdown..." msgstr "VM already halted, skipping shutdown..." -#: nova/virt/xenapi/vm_utils.py:392 +#: nova/virt/xenapi/vm_utils.py:393 #, python-format msgid "VBD %s already detached" msgstr "VBD %s already detached" -#: nova/virt/xenapi/vm_utils.py:395 +#: nova/virt/xenapi/vm_utils.py:396 #, python-format msgid "" "VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt " "%(num_attempt)d/%(max_attempts)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:402 +#: nova/virt/xenapi/vm_utils.py:403 #, python-format msgid "Unable to unplug VBD %s" msgstr "Unable to unplug VBD %s" -#: nova/virt/xenapi/vm_utils.py:405 +#: nova/virt/xenapi/vm_utils.py:406 #, python-format msgid "Reached maximum number of retries trying to unplug VBD %s" msgstr "Reached maximum number of retries trying to unplug VBD %s" -#: nova/virt/xenapi/vm_utils.py:417 +#: nova/virt/xenapi/vm_utils.py:418 #, python-format msgid "Unable to destroy VBD %s" msgstr "Unable to destroy VBD %s" -#: nova/virt/xenapi/vm_utils.py:470 +#: nova/virt/xenapi/vm_utils.py:471 #, python-format msgid "Unable to destroy VDI %s" msgstr "Unable to destroy VDI %s" -#: nova/virt/xenapi/vm_utils.py:516 +#: nova/virt/xenapi/vm_utils.py:517 msgid "SR not present and could not be introduced" msgstr "SR not present and could not be introduced" -#: nova/virt/xenapi/vm_utils.py:700 +#: nova/virt/xenapi/vm_utils.py:701 #, python-format msgid "No primary VDI found for %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:792 +#: nova/virt/xenapi/vm_utils.py:793 #, python-format msgid "" "Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s" " is of type %(type)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:871 +#: nova/virt/xenapi/vm_utils.py:872 #, python-format msgid "Multiple base images for image: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:926 +#: nova/virt/xenapi/vm_utils.py:927 #, python-format msgid "" "VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor " "size of %(new_disk_size)d bytes." msgstr "" -#: nova/virt/xenapi/vm_utils.py:937 nova/virt/xenapi/vmops.py:1037 +#: nova/virt/xenapi/vm_utils.py:938 nova/virt/xenapi/vmops.py:1037 msgid "Can't resize a disk to 0 GB." msgstr "" -#: nova/virt/xenapi/vm_utils.py:989 +#: nova/virt/xenapi/vm_utils.py:990 msgid "Disk must have only one partition." msgstr "" -#: nova/virt/xenapi/vm_utils.py:994 +#: nova/virt/xenapi/vm_utils.py:995 #, python-format msgid "Disk contains a filesystem we are unable to resize: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:999 +#: nova/virt/xenapi/vm_utils.py:1000 msgid "The only partition should be partition 1." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1010 +#: nova/virt/xenapi/vm_utils.py:1011 #, python-format msgid "Attempted auto_configure_disk failed because: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1261 +#: nova/virt/xenapi/vm_utils.py:1262 #, python-format msgid "" "Fast cloning is only supported on default local SR of type ext. SR on " "this system was found to be of type %s. Ignoring the cow flag." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1336 +#: nova/virt/xenapi/vm_utils.py:1337 #, python-format msgid "Unrecognized cache_images value '%s', defaulting to True" msgstr "Unrecognized cache_images value '%s', defaulting to True" -#: nova/virt/xenapi/vm_utils.py:1412 +#: nova/virt/xenapi/vm_utils.py:1413 #, python-format msgid "Invalid value '%s' for torrent_images" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1435 +#: nova/virt/xenapi/vm_utils.py:1436 #, python-format msgid "Invalid value '%d' for image_compression_level" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1461 +#: nova/virt/xenapi/vm_utils.py:1462 #, python-format msgid "" "Download handler '%(handler)s' raised an exception, falling back to " "default handler '%(default_handler)s'" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1517 +#: nova/virt/xenapi/vm_utils.py:1518 #, python-format msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1568 +#: nova/virt/xenapi/vm_utils.py:1569 #, python-format msgid "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " @@ -9366,37 +9407,37 @@ msgstr "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " "bytes" -#: nova/virt/xenapi/vm_utils.py:1610 +#: nova/virt/xenapi/vm_utils.py:1611 msgid "Failed to fetch glance image" msgstr "Failed to fetch glance image" -#: nova/virt/xenapi/vm_utils.py:1818 +#: nova/virt/xenapi/vm_utils.py:1819 #, python-format msgid "Unable to parse rrd of %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1848 +#: nova/virt/xenapi/vm_utils.py:1849 #, python-format msgid "Retry SR scan due to error: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1881 +#: nova/virt/xenapi/vm_utils.py:1882 #, python-format msgid "Flag sr_matching_filter '%s' does not respect formatting convention" msgstr "Flag sr_matching_filter '%s' does not respect formatting convention" -#: nova/virt/xenapi/vm_utils.py:1902 +#: nova/virt/xenapi/vm_utils.py:1903 msgid "" "XenAPI is unable to find a Storage Repository to install guest instances " "on. Please check your configuration (e.g. set a default SR for the pool) " "and/or configure the flag 'sr_matching_filter'." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1915 +#: nova/virt/xenapi/vm_utils.py:1916 msgid "Cannot find SR of content-type ISO" msgstr "Cannot find SR of content-type ISO" -#: nova/virt/xenapi/vm_utils.py:1968 +#: nova/virt/xenapi/vm_utils.py:1969 #, python-format msgid "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " @@ -9405,60 +9446,60 @@ msgstr "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " "%(server)s." -#: nova/virt/xenapi/vm_utils.py:2096 +#: nova/virt/xenapi/vm_utils.py:2097 #, python-format msgid "VHD coalesce attempts exceeded (%d), giving up..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2131 +#: nova/virt/xenapi/vm_utils.py:2132 #, python-format msgid "Timeout waiting for device %s to be created" msgstr "Timeout waiting for device %s to be created" -#: nova/virt/xenapi/vm_utils.py:2151 +#: nova/virt/xenapi/vm_utils.py:2152 #, python-format msgid "Disconnecting stale VDI %s from compute domU" msgstr "Disconnecting stale VDI %s from compute domU" -#: nova/virt/xenapi/vm_utils.py:2309 +#: nova/virt/xenapi/vm_utils.py:2310 msgid "" "Shrinking the filesystem down with resize2fs has failed, please check if " "you have enough free space on your disk." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2444 +#: nova/virt/xenapi/vm_utils.py:2445 msgid "Manipulating interface files directly" msgstr "Manipulating interface files directly" -#: nova/virt/xenapi/vm_utils.py:2453 +#: nova/virt/xenapi/vm_utils.py:2454 #, python-format msgid "Failed to mount filesystem (expected for non-linux instances): %s" msgstr "Failed to mount filesystem (expected for non-linux instances): %s" -#: nova/virt/xenapi/vm_utils.py:2564 +#: nova/virt/xenapi/vm_utils.py:2566 msgid "This domU must be running on the host specified by connection_url" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2633 +#: nova/virt/xenapi/vm_utils.py:2635 msgid "Failed to transfer vhd to new host" msgstr "Failed to transfer vhd to new host" -#: nova/virt/xenapi/vm_utils.py:2659 +#: nova/virt/xenapi/vm_utils.py:2661 msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2665 +#: nova/virt/xenapi/vm_utils.py:2667 msgid "ipxe_network_name not set, user will have to enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2676 +#: nova/virt/xenapi/vm_utils.py:2678 #, python-format msgid "" "Unable to find network matching '%(network_name)s', user will have to " "enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2700 +#: nova/virt/xenapi/vm_utils.py:2702 #, python-format msgid "ISO creation tool '%s' does not exist." msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-critical.po b/nova/locale/es/LC_MESSAGES/nova-log-critical.po new file mode 100644 index 0000000000..9b9347a953 --- /dev/null +++ b/nova/locale/es/LC_MESSAGES/nova-log-critical.po @@ -0,0 +1,33 @@ +# Translations template for nova. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the nova project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: nova\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"PO-Revision-Date: 2014-07-16 11:52+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" +"es/)\n" +"Language: es\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: nova/virt/vmwareapi/driver.py:864 +#, python-format +msgid "" +"Unable to connect to server at %(server)s, sleeping for %(seconds)s seconds" +msgstr "" +"Incapaz de conectar al servidor en %(server)s, esperando durante %(seconds)s " +"segundos" + +#: nova/virt/vmwareapi/driver.py:973 +#, python-format +msgid "In vmwareapi: _call_method (session=%s)" +msgstr "En vmwareapi: _call_method (session=%s)" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-error.po b/nova/locale/es/LC_MESSAGES/nova-log-error.po index 600dd01dc6..8df99c1938 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-error.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" -"PO-Revision-Date: 2014-06-30 05:01+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -44,6 +44,13 @@ msgstr "" msgid "Keystone failure: %s" msgstr "Anomalía de keystone: %s" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" +"La tarea periódica sync_power_state ha tenido un error al procesar una " +"instancia." + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "No se ha podido notificar a las células el error de instancia" @@ -58,7 +65,7 @@ msgstr "Se está descartando excepción original: %s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "La excepción inesperada ha ocurrido %d vez(veces)... reintentando." -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "No se ha podido liberar el bloqueo adquirido `%s`" @@ -71,22 +78,22 @@ msgstr "en llamada en bucle de duración fija" msgid "in dynamic looping call" msgstr "en llamada en bucle dinámica" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "Error durante %(full_task_name)s: %(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "Ha ocurrido un error al interpretar la regla %s" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "No hay manejador para coincidencias de clase %s" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "Ha ocurrido un error al interpretar la regla %r" @@ -116,54 +123,50 @@ msgstr "Excepción de base de datos recortada." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" "Nova necesita libvirt versión %(major)i.%(minor)i.%(micro)i o superior." -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "Ha fallado la conexión a libvirt: %s" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "Error de libvirt durante destrucción. Código=%(errcode)s Error=%(e)s" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "Durante la destrucción de espera, la instancia ha desaparecido." - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" "Error de libvirt durante borrado de definición. Código=%(errcode)s Error=" "%(e)s" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" "Error de libvirt durante eliminación de filtro. Código=%(errcode)s Error=" "%(e)s" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "se ha encontrado un error en la conexión del adaptador de red." -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "se ha encontrado un error en la desconexión del adaptador de red." -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" "Fallo al enviar estado de instantánea actualizada al servicio de volumen." -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." @@ -171,70 +174,70 @@ msgstr "" "Incapaz de crear instantánea de VM inmovilizada, intentando nuevamente con " "la inmovilidad deshabilitada" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" "Incapaz de crear instantánea de VM, operación de volume_snapshot fallida." -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" "Error ocurrido durante volume_snapshot_create, enviando estado de error a " "Cinder." -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" "Ha ocurrido un error durante volume_snapshot_delete, envinado estado de " "error a Cinder." -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "Error en '%(path)s' al comprobar E/S directa: '%(ex)s'" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "Error al inyectar datos en imagen %(img_id)s (%(e)s)" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "La creación de unidad de configuración ha fallado con el error: %s" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "La asociación de dispositivos PCI %(dev)s a %(dom)s ha fallado." -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "Un error ha ocurrido al tratar de definir un dominio con xml: %s" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" "Un error ha ocurrido al intentar lanzar un dominio definido con xml: %s" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" "Un error ha ocurrido al habilitar el modo pasador en el dominio con xml: %s" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" "Neutron ha reportado una falla en el evento %(event)s para la instancia " "%(uuid)s" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " @@ -243,22 +246,22 @@ msgstr "" "El nombre del anfitrión ha cambiado de %(old)s a %(new)s. Se requiere un " "reinicio para hacer efecto." -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "Fallo en migración en vivo: %s" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "No se ha podido limpiar el directorio %(target)s: %(e)s" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "Incapaz de preallocate_images=%(imgs)s en la ruta: %(path)s" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " @@ -267,7 +270,7 @@ msgstr "" "El tamaño virtual %(base_size)s de %(base)s es más grande que el tamaño del " "disco raíz del sabor %(size)s" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "Error al abrir imagen rbd %s" @@ -287,20 +290,20 @@ msgstr "imagen %(id)s en (%(base_file)s): ha fallado la verificación de imagen" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "No se ha podido eliminar %(base_file)s, el error era %(error)s" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "Ignorando valor no reconocido volume_clear='%s'" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "Fallo al conectar vif" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "No se ha podido desconectar vif" @@ -309,12 +312,18 @@ msgstr "No se ha podido desconectar vif" msgid "Unknown content in connection_info/access_mode: %s" msgstr "Contenido desconocido en connection_info/access_mode: %s" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "No se puede desmontar el recurso compartido NFS %s" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "No se puede desmontar el recurso compartido GlusterFS %s" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-info.po b/nova/locale/es/LC_MESSAGES/nova-log-info.po index 6d7b4a5c34..2d20bcc0a1 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-30 05:01+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -19,27 +19,35 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" +"Durante sync_power_state la instancia ha dejado una tarea pendiente " +"(%(task)s). Omitir." + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "Candado creado ruta: %s" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "Omitiendo la tarea periódica %(task)s porque el intervalo es negativo" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Omitiendo la tarea periódica %(task)s porque está inhabilitada" @@ -101,91 +109,96 @@ msgstr "Eliminando registro duplicado con id: %(id)s de la tabla: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "La instancia se ha destruido satisfactoriamente. " -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "La instancia puede volver a iniciarse." -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "Se va a volver a destruir la instancia." -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "Empezando proceso de instantánea en directo" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "Empezando proceso de instantánea frío" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "Se ha extraído instantánea, empezando subida de imagen" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "Subida de imagen de instantánea se ha completado" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "" "La instancia ha rearrancado satisfactoriamente de forma no permanente. " -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "La instancia ha concluido satisfactoriamente." -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "Es posible que la instancia se haya rearrancado durante el arranque no " "permanente, por consiguiente volver ahora." -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "La instancia ha rearrancado satisfactoriamente." -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "Instancia generada satisfactoriamente. " -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Se ha devuelto registro de consola truncado, se han ignorado %d bytes " -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "Creando imagen" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "Utilizando unidad de configuración" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creando unidad de configuración en %(path)s" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "Configurando la zona horaria para la instancia windows a horario local" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "libvirt no puede encontrar un dominio con id: %s" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -195,7 +208,7 @@ msgstr "" "desasociado. Instancia=%(instance_name)s Disco=%(disk)s Código=%(errcode)s " "Error=%(e)s" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -204,26 +217,26 @@ msgstr "" "No se ha podido encontrar el dominio en libvirt para la instancia %s. No se " "pueden obtener estadísticas de bloque para el dispositivo" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "La instancia se está ejecutando satisfactoriamente." -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "Eliminado los archivos de instancia %s" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "La remoción de %s ha fallado" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "La remoción de %s se ha completado" @@ -236,7 +249,7 @@ msgstr "Se ha llamado a setup_basic_filtering en nwfilter" msgid "Ensuring static filters" msgstr "Asegurando filtros estáticos" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada" @@ -297,11 +310,11 @@ msgstr "Archivos de base corruptos: %s " msgid "Removable base files: %s" msgstr "Archivos de base eliminables: %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "La herramienta findmnt no está instalada" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/es/LC_MESSAGES/nova-log-warning.po b/nova/locale/es/LC_MESSAGES/nova-log-warning.po index 64fb423b59..785e991caf 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-warning.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-warning.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2014-06-24 16:11+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" @@ -19,10 +19,15 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:1998 +#: nova/compute/manager.py:2002 msgid "No more network or fixed IP to be allocated" msgstr "" +#: nova/compute/manager.py:2267 +#, python-format +msgid "Failed to delete volume: %(volume_id)s due to %(exc)s" +msgstr "" + #: nova/consoleauth/manager.py:84 #, python-format msgid "Token: %(token)s failed to save into memcached." @@ -94,14 +99,14 @@ msgstr "No se puede decodificar cpu_allocation_ratio: '%s'" msgid "Could not decode ram_allocation_ratio: '%s'" msgstr "No se puede decodificar ram_allocation_ratio: '%s'" -#: nova/virt/libvirt/driver.py:368 +#: nova/virt/libvirt/driver.py:374 #, python-format msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s." msgstr "" "Modalidad de caché %(cache_mode)s no válida especificada para el tipo de " "disco %(disk_type)s." -#: nova/virt/libvirt/driver.py:606 +#: nova/virt/libvirt/driver.py:620 #, python-format msgid "" "The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack " @@ -112,77 +117,81 @@ msgstr "" "projecto de OpenStack por lo cual su calidad no puede ser asegurada. Para " "mas información, ver: https://wiki.openstack.org/wiki/HypervisorSupportMatrix" -#: nova/virt/libvirt/driver.py:656 +#: nova/virt/libvirt/driver.py:671 #, python-format msgid "URI %(uri)s does not support events: %(error)s" msgstr "URI %(uri)s no soporta eventos: %(error)s" -#: nova/virt/libvirt/driver.py:672 +#: nova/virt/libvirt/driver.py:687 #, python-format msgid "URI %(uri)s does not support connection events: %(error)s" msgstr "URI %(uri)s no soporta eventos de conexión: %(error)s" -#: nova/virt/libvirt/driver.py:865 +#: nova/virt/libvirt/driver.py:929 msgid "Cannot destroy instance, operation time out" msgstr "" "No se puede destruir intsancia, tiempo de espera agotado para la operación" -#: nova/virt/libvirt/driver.py:971 +#: nova/virt/libvirt/driver.py:953 +msgid "During wait destroy, instance disappeared." +msgstr "" + +#: nova/virt/libvirt/driver.py:1035 msgid "Instance may be still running, destroy it again." msgstr "Puede que la instancia aún se esté ejecutando, vuelva a destruirla." -#: nova/virt/libvirt/driver.py:1026 +#: nova/virt/libvirt/driver.py:1088 #, python-format msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s" msgstr "" "Ignorando Error de volumen en volumen %(vol_id)s durante la remocion %(exc)s" -#: nova/virt/libvirt/driver.py:1076 +#: nova/virt/libvirt/driver.py:1141 #, python-format msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually" msgstr "" "El volumen %(disk)s es posiblemente inseguro para remover, por favor " "límpialo manualmente" -#: nova/virt/libvirt/driver.py:1357 nova/virt/libvirt/driver.py:1365 +#: nova/virt/libvirt/driver.py:1415 nova/virt/libvirt/driver.py:1423 msgid "During detach_volume, instance disappeared." msgstr "Durante detach_volume, la instancia ha desaparecido." -#: nova/virt/libvirt/driver.py:1410 +#: nova/virt/libvirt/driver.py:1466 msgid "During detach_interface, instance disappeared." msgstr "Durante detach_interface, la instancia ha desaparecido." -#: nova/virt/libvirt/driver.py:1976 +#: nova/virt/libvirt/driver.py:2053 msgid "Failed to soft reboot instance. Trying hard reboot." msgstr "" "Fallo al reiniciar la instancia de manera suave. Intentando reinicio duro." -#: nova/virt/libvirt/driver.py:2537 +#: nova/virt/libvirt/driver.py:2614 #, python-format msgid "Image %s not found on disk storage. Continue without injecting data" msgstr "" "La imagen %s no se ha encontrado en el almacenamiento de disco. Continuando " "sin inyectar datos." -#: nova/virt/libvirt/driver.py:2700 +#: nova/virt/libvirt/driver.py:2777 msgid "File injection into a boot from volume instance is not supported" msgstr "" "La inyección de archivo al arranque desde la instancia del volumen no está " "soportado." -#: nova/virt/libvirt/driver.py:2775 +#: nova/virt/libvirt/driver.py:2852 msgid "Instance disappeared while detaching a PCI device from it." msgstr "" "La instancia ha desaparecido mientras se removía el dispositivo PCI de ella." -#: nova/virt/libvirt/driver.py:2830 +#: nova/virt/libvirt/driver.py:2907 #, python-format msgid "Cannot update service status on host: %s,since it is not registered." msgstr "" "No se puede actualizar el estado del servicio en el anfitrión: %s, ya que el " "mismo no está registrado." -#: nova/virt/libvirt/driver.py:2833 +#: nova/virt/libvirt/driver.py:2910 #, python-format msgid "" "Cannot update service status on host: %s,due to an unexpected exception." @@ -190,19 +199,24 @@ msgstr "" "No se puede atualizar el estado del servicio en el anfitrión: %s, debido a " "una excepción inesperada." -#: nova/virt/libvirt/driver.py:2861 +#: nova/virt/libvirt/driver.py:2938 #, python-format msgid "URI %(uri)s does not support full set of host capabilities: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:3672 +#: nova/virt/libvirt/driver.py:3763 #, python-format msgid "Timeout waiting for vif plugging callback for instance %(uuid)s" msgstr "" "Tiempo excedido para la llamada inversa de la conexión vif para la instancia " "%(uuid)s" -#: nova/virt/libvirt/driver.py:3750 +#: nova/virt/libvirt/driver.py:3784 +#, python-format +msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:3841 msgid "" "Cannot get the number of cpu, because this function is not implemented for " "this platform. " @@ -210,24 +224,28 @@ msgstr "" "No se puede obtener el número de CPU porque esta función no está " "implementada para esta plataforma. " -#: nova/virt/libvirt/driver.py:3813 +#: nova/virt/libvirt/driver.py:3901 +#, python-format +msgid "" +"couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:3932 #, python-format -msgid "couldn't obtain the vpu count from domain id: %(id)s, exception: %(ex)s" +msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s" msgstr "" -"no se puede obtener el conteo de vpu del identificador del dominio: %(id)s, " -"excepción: %(ex)s" -#: nova/virt/libvirt/driver.py:4050 +#: nova/virt/libvirt/driver.py:4134 #, python-format msgid "URI %(uri)s does not support listDevices: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:4594 +#: nova/virt/libvirt/driver.py:4789 #, python-format msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d." msgstr "plug_vifs() ha fallado %(cnt)d. Intentando hasta %(max_retry)d." -#: nova/virt/libvirt/driver.py:4727 +#: nova/virt/libvirt/driver.py:4990 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error " @@ -236,7 +254,7 @@ msgstr "" "Error de libvirt al obtener la descripción de %(instance_name)s: [Código de " "error %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:4805 +#: nova/virt/libvirt/driver.py:4998 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -247,7 +265,7 @@ msgstr "" "intentando obtener el disco %(i_name)s, pero el disco ha sido removido por " "operaciones concurrentes como la modificación de tamaño." -#: nova/virt/libvirt/driver.py:4811 +#: nova/virt/libvirt/driver.py:5004 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -285,7 +303,7 @@ msgstr "" msgid "Unknown base file: %s" msgstr "Archivo de base desconocido: %s " -#: nova/virt/libvirt/lvm.py:68 +#: nova/virt/libvirt/lvm.py:67 #, python-format msgid "" "Volume group %(vg)s will not be able to hold sparse volume %(lv)s. Virtual " @@ -300,12 +318,12 @@ msgstr "" msgid "systool is not installed" msgstr "systool no está instalado" -#: nova/virt/libvirt/utils.py:242 +#: nova/virt/libvirt/utils.py:248 #, python-format msgid "rbd remove %(name)s in pool %(pool)s failed" msgstr "la remoción rbd de %(name)s en el conjunto %(pool)s ha fallado" -#: nova/virt/libvirt/vif.py:827 +#: nova/virt/libvirt/vif.py:767 #, python-format msgid "" "VIF driver \"%s\" is marked as deprecated and will be removed in the Juno " @@ -317,7 +335,7 @@ msgstr "" msgid "Unknown content in connection_info/qos_specs: %s" msgstr "Contenido desconocido en connection_info/qos_specs: %s" -#: nova/virt/libvirt/volume.py:294 +#: nova/virt/libvirt/volume.py:297 #, python-format msgid "" "ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try " @@ -326,12 +344,12 @@ msgstr "" "El volumen ISCSI aún no se ha encontrado en: %(disk_dev)s. Se volverá a " "explorar y se reintentará. Número de intentos: %(tries)s" -#: nova/virt/libvirt/volume.py:361 +#: nova/virt/libvirt/volume.py:364 #, python-format msgid "Unable to delete volume device %s" msgstr "Incapaz de eliminar el dispositivo de volumen %s" -#: nova/virt/libvirt/volume.py:372 +#: nova/virt/libvirt/volume.py:375 #, python-format msgid "" "Failed to remove multipath device descriptor %(dev_mapper)s. Exception " @@ -340,19 +358,19 @@ msgstr "" "Fallo al remover el descriptor del dispositivo multiruta %(dev_mapper)s. " "Mensaje de excepción: %(msg)s" -#: nova/virt/libvirt/volume.py:694 nova/virt/libvirt/volume.py:843 +#: nova/virt/libvirt/volume.py:697 nova/virt/libvirt/volume.py:846 #, python-format msgid "%s is already mounted" msgstr "%s ya está montado " -#: nova/virt/libvirt/volume.py:739 +#: nova/virt/libvirt/volume.py:742 #, python-format msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s" msgstr "" "El volumen AoE aún no se ha encontrado en: %(aoedevpath)s. Número de " "intentos: %(tries)s" -#: nova/virt/libvirt/volume.py:931 +#: nova/virt/libvirt/volume.py:934 #, python-format msgid "" "Fibre volume not yet found at: %(mount_device)s. Will rescan & retry. Try " @@ -361,19 +379,25 @@ msgstr "" "El volumen de fibra aún no se ha encontrado en: %(mount_device)s. Se volverá " "a explorar y se reintentará. Número de intentos: %(tries)s" -#: nova/virt/libvirt/volume.py:1033 +#: nova/virt/libvirt/volume.py:1036 msgid "Value required for 'scality_sofs_config'" msgstr "Valor necesario para 'scality_sofs_config'" -#: nova/virt/libvirt/volume.py:1044 +#: nova/virt/libvirt/volume.py:1047 #, python-format msgid "Cannot access 'scality_sofs_config': %s" msgstr "No se puede acceder a 'scality_sofs_config': %s" -#: nova/virt/libvirt/volume.py:1050 +#: nova/virt/libvirt/volume.py:1053 msgid "Cannot execute /sbin/mount.sofs" msgstr "No se puede ejecutar /sbin/mount.sofs" -#: nova/virt/libvirt/volume.py:1065 +#: nova/virt/libvirt/volume.py:1068 msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "No se puede montar Scality SOFS, compruebe syslog por si hay errores" + +#~ msgid "" +#~ "couldn't obtain the vpu count from domain id: %(id)s, exception: %(ex)s" +#~ msgstr "" +#~ "no se puede obtener el conteo de vpu del identificador del dominio: " +#~ "%(id)s, excepción: %(ex)s" diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po index cf6c5d1957..d238d41a52 100644 --- a/nova/locale/es/LC_MESSAGES/nova.po +++ b/nova/locale/es/LC_MESSAGES/nova.po @@ -12,8 +12,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-30 04:40+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-19 23:09+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish " "(http://www.transifex.com/projects/p/nova/language/es/)\n" @@ -23,39 +23,39 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/block_device.py:99 +#: nova/block_device.py:100 msgid "Some fields are invalid." msgstr "" -#: nova/block_device.py:109 +#: nova/block_device.py:110 msgid "Some required fields are missing" msgstr "" -#: nova/block_device.py:125 +#: nova/block_device.py:126 msgid "Boot index is invalid." msgstr "" -#: nova/block_device.py:168 +#: nova/block_device.py:169 msgid "Unrecognized legacy format." msgstr "" -#: nova/block_device.py:185 +#: nova/block_device.py:186 msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:189 +#: nova/block_device.py:190 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:368 +#: nova/block_device.py:369 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:372 +#: nova/block_device.py:373 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:382 +#: nova/block_device.py:383 msgid "Invalid volume_size." msgstr "" @@ -436,53 +436,53 @@ msgstr "Fallo al terminar la instancia: %(reason)s" msgid "Failed to deploy instance: %(reason)s" msgstr "Fallo al desplegar instancia: %(reason)s" -#: nova/exception.py:402 +#: nova/exception.py:402 nova/exception.py:406 #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Fallo al ejecutar instancias: %(reason)s" -#: nova/exception.py:406 +#: nova/exception.py:410 msgid "Service is unavailable at this time." msgstr "El servicio no esta disponible en este momento" -#: nova/exception.py:410 +#: nova/exception.py:414 #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Recursos de cómputo insuficientes: %(reason)s." -#: nova/exception.py:414 +#: nova/exception.py:418 #, python-format msgid "Connection to the hypervisor is broken on host: %(host)s" msgstr "La conexión al hipervisor está perdida en el anfitrión: %(host)s" -#: nova/exception.py:418 +#: nova/exception.py:422 #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "El servicio Compute de %(host)s no está disponible en este momento." -#: nova/exception.py:422 +#: nova/exception.py:426 #, python-format msgid "Compute service of %(host)s is still in use." msgstr "El servicio Compute de %(host)s todavía se encuentra en uso." -#: nova/exception.py:426 +#: nova/exception.py:430 #, python-format msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Incapaz de emigrar la instancia %(instance_id)s al actual anfitrion " "(%(host)s)" -#: nova/exception.py:431 +#: nova/exception.py:435 msgid "The supplied hypervisor type of is invalid." msgstr "El tipo de hipervisor proporcionado no es válido. " -#: nova/exception.py:435 +#: nova/exception.py:439 msgid "The instance requires a newer hypervisor version than has been provided." msgstr "" "La instancia necesita una versión de hipervisor más reciente que la " "proporcionada." -#: nova/exception.py:440 +#: nova/exception.py:444 #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " @@ -491,32 +491,32 @@ msgstr "" "La ruta de disco proporcionada (%(path)s) ya existe, se espera una que no" " exista." -#: nova/exception.py:445 +#: nova/exception.py:449 #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "La ruta proporcionada al dispositivo (%(path)s) no es válida." -#: nova/exception.py:449 +#: nova/exception.py:453 #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "La ruta proporcionada al dispositivo (%(path)s) está en uso." -#: nova/exception.py:454 +#: nova/exception.py:458 #, python-format msgid "The supplied device (%(device)s) is busy." msgstr "El dispositivo proporcionado (%(device)s) está ocupado." -#: nova/exception.py:458 +#: nova/exception.py:462 #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Información de CPU inválida: %(reason)s" -#: nova/exception.py:462 +#: nova/exception.py:466 #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s no es una direccion IP v4/6 valida" -#: nova/exception.py:466 +#: nova/exception.py:470 #, python-format msgid "" "VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " @@ -526,7 +526,7 @@ msgstr "" "etiqueta VLAN que se espera es %(tag)s, pero la asociada con el grupo de " "puertos es %(pgroup)s." -#: nova/exception.py:472 +#: nova/exception.py:476 #, python-format msgid "" "vSwitch which contains the port group %(bridge)s is not associated with " @@ -537,60 +537,60 @@ msgstr "" "con el adaptador físico deseado. El vSwitch esperado es %(expected)s, " "pero el asociado es %(actual)s." -#: nova/exception.py:479 +#: nova/exception.py:483 #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Formato de disco %(disk_format)s no es aceptable" -#: nova/exception.py:483 +#: nova/exception.py:487 #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "El archivo de información de disco es inválido: %(reason)s" -#: nova/exception.py:487 +#: nova/exception.py:491 #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "Fallo al leer o escribir el archivo de información de disco: %(reason)s" -#: nova/exception.py:491 +#: nova/exception.py:495 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "La imagen %(image_id)s es inaceptable: %(reason)s" -#: nova/exception.py:495 +#: nova/exception.py:499 #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "La instancia %(instance_id)s no es aceptable: %(reason)s" -#: nova/exception.py:499 +#: nova/exception.py:503 #, python-format msgid "Ec2 id %(ec2_id)s is unacceptable." msgstr "El id de Ec2 %(ec2_id)s no es aceptable. " -#: nova/exception.py:503 +#: nova/exception.py:507 #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s." -#: nova/exception.py:507 +#: nova/exception.py:511 #, python-format msgid "Invalid ID received %(id)s." msgstr "Se ha recibido el ID %(id)s no válido." -#: nova/exception.py:511 +#: nova/exception.py:515 msgid "Constraint not met." msgstr "Restricción no cumplida." -#: nova/exception.py:516 +#: nova/exception.py:520 msgid "Resource could not be found." msgstr "No se ha podido encontrar el recurso." -#: nova/exception.py:521 +#: nova/exception.py:525 #, python-format msgid "No agent-build associated with id %(id)s." msgstr "No hay ninguna compilación de agente asociada con el id %(id)s." -#: nova/exception.py:525 +#: nova/exception.py:529 #, python-format msgid "" "Agent-build with hypervisor %(hypervisor)s os %(os)s architecture " @@ -599,53 +599,53 @@ msgstr "" "Compilación agente con hipervisor %(hypervisor)s S.O. %(os)s arquitectura" " %(architecture)s existe." -#: nova/exception.py:531 +#: nova/exception.py:535 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "No se ha podido encontrar el volumen %(volume_id)s." -#: nova/exception.py:535 +#: nova/exception.py:539 #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "No hay volumen de Block Device Mapping con identificador %(volume_id)s." -#: nova/exception.py:540 +#: nova/exception.py:544 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s." -#: nova/exception.py:544 +#: nova/exception.py:548 #, python-format msgid "No disk at %(location)s" msgstr "No hay ningún disco en %(location)s" -#: nova/exception.py:548 +#: nova/exception.py:552 #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "No se ha podido encontrar un manejador para el volumen %(driver_type)s." -#: nova/exception.py:552 +#: nova/exception.py:556 #, python-format msgid "Invalid image href %(image_href)s." msgstr "href de imagen %(image_href)s no válida." -#: nova/exception.py:556 +#: nova/exception.py:560 #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "La imagen solicitada %(image)s tiene desactivada la modificación " "automática de tamaño de disco." -#: nova/exception.py:561 +#: nova/exception.py:565 #, python-format msgid "Image %(image_id)s could not be found." msgstr "No se ha podido encontrar la imagen %(image_id)s. " -#: nova/exception.py:565 +#: nova/exception.py:569 msgid "The current driver does not support preserving ephemeral partitions." msgstr "El dispositivo actual no soporta la preservación de particiones efímeras." -#: nova/exception.py:571 +#: nova/exception.py:575 #, python-format msgid "" "Image %(image_id)s could not be found. The nova EC2 API assigns image ids" @@ -656,69 +656,69 @@ msgstr "" "ID de imagen dinámicamente cuando se listan por primera vez. ¿Ha listado " "los ID de imagen desde que ha añadido esta imagen?" -#: nova/exception.py:578 +#: nova/exception.py:582 #, python-format msgid "Project %(project_id)s could not be found." msgstr "No se ha podido encontrar el proyecto %(project_id)s." -#: nova/exception.py:582 +#: nova/exception.py:586 msgid "Cannot find SR to read/write VDI." msgstr "No se puede encontrar SR para leer/grabar VDI." -#: nova/exception.py:586 +#: nova/exception.py:590 #, python-format msgid "Network %(network_id)s is duplicated." msgstr "La red %(network_id)s está duplicada." -#: nova/exception.py:590 +#: nova/exception.py:594 #, python-format msgid "Network %(network_id)s is still in use." msgstr "La red %(network_id)s aún se está utilizando." -#: nova/exception.py:594 +#: nova/exception.py:598 #, python-format msgid "%(req)s is required to create a network." msgstr "Se necesita %(req)s para crear una red." -#: nova/exception.py:598 +#: nova/exception.py:602 #, python-format msgid "Network %(network_id)s could not be found." msgstr "No se ha podido encontrar la red %(network_id)s." -#: nova/exception.py:602 +#: nova/exception.py:606 #, python-format msgid "Port id %(port_id)s could not be found." msgstr "No se ha podido encontrar el ID de puerto %(port_id)s." -#: nova/exception.py:606 +#: nova/exception.py:610 #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "No se ha podido encontrar la red para el puente %(bridge)s" -#: nova/exception.py:610 +#: nova/exception.py:614 #, python-format msgid "Network could not be found for uuid %(uuid)s" msgstr "No se ha podido encontrar la red para el uuid %(uuid)s" -#: nova/exception.py:614 +#: nova/exception.py:618 #, python-format msgid "Network could not be found with cidr %(cidr)s." msgstr "No se ha podido encontrar la red con cidr %(cidr)s." -#: nova/exception.py:618 +#: nova/exception.py:622 #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "No se ha podido encontrar la red para la instancia %(instance_id)s." -#: nova/exception.py:622 +#: nova/exception.py:626 msgid "No networks defined." msgstr "No se han definido redes." -#: nova/exception.py:626 +#: nova/exception.py:630 msgid "No more available networks." msgstr "No se encuentran más redes disponibles." -#: nova/exception.py:630 +#: nova/exception.py:634 #, python-format msgid "" "Either network uuid %(network_uuid)s is not present or is not assigned to" @@ -727,7 +727,7 @@ msgstr "" "Bien sea que el uuid de la red %(network_uuid)s no está presente o no " "está asignado al proyecto %(project_id)s." -#: nova/exception.py:635 +#: nova/exception.py:639 msgid "" "More than one possible network found. Specify network ID(s) to select " "which one(s) to connect to," @@ -735,86 +735,86 @@ msgstr "" "Se ha encontrado más de una red posible. Especifique el ID de la red para" " seleccionar a cuál(es) conectarse." -#: nova/exception.py:640 +#: nova/exception.py:644 #, python-format msgid "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "La red %(network_uuid)s requiere una subred para poder arrancar " "instancias." -#: nova/exception.py:645 +#: nova/exception.py:649 #, python-format msgid "" "It is not allowed to create an interface on external network " "%(network_uuid)s" msgstr "No está permitido crear una interfaz en una red externa %(network_uuid)s" -#: nova/exception.py:650 +#: nova/exception.py:654 msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "No se ha podido encontrar la(s) referencia(s) de almacén de datos que la " "MV utiliza." -#: nova/exception.py:654 +#: nova/exception.py:658 #, python-format msgid "Port %(port_id)s is still in use." msgstr "El puerto %(port_id)s todavía se está utilizando." -#: nova/exception.py:658 +#: nova/exception.py:662 #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "El puerto %(port_id)s requiere una FixedIP para poder ser utilizado." -#: nova/exception.py:662 +#: nova/exception.py:666 #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "El puerto %(port_id)s no es utilizable para la instancia %(instance)s." -#: nova/exception.py:666 +#: nova/exception.py:670 #, python-format msgid "No free port available for instance %(instance)s." msgstr "No hay ningún puerto libre disponible para la instancia %(instance)s." -#: nova/exception.py:670 +#: nova/exception.py:674 #, python-format msgid "Fixed ip %(address)s already exists." msgstr "La dirección IP estática %(address)s ya existe." -#: nova/exception.py:674 +#: nova/exception.py:678 #, python-format msgid "No fixed IP associated with id %(id)s." msgstr "No hay ninguna dirección IP fija asociada con el %(id)s." -#: nova/exception.py:678 +#: nova/exception.py:682 #, python-format msgid "Fixed ip not found for address %(address)s." msgstr "No se ha encontrado una dirección IP fija para la dirección %(address)s." -#: nova/exception.py:682 +#: nova/exception.py:686 #, python-format msgid "Instance %(instance_uuid)s has zero fixed ips." msgstr "La instancia %(instance_uuid)s no tiene ninguna IP fija." -#: nova/exception.py:686 +#: nova/exception.py:690 #, python-format msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." msgstr "" "El host de red %(host)s no tiene ninguna dirección IP fija en la red " "%(network_id)s." -#: nova/exception.py:691 +#: nova/exception.py:695 #, python-format msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." msgstr "La instancia %(instance_uuid)s no tiene la dirección IP fija '%(ip)s'." -#: nova/exception.py:695 +#: nova/exception.py:699 #, python-format msgid "" "Fixed IP address (%(address)s) does not exist in network " "(%(network_uuid)s)." msgstr "La dirección IP fija (%(address)s) no existe en la red (%(network_uuid)s)." -#: nova/exception.py:700 +#: nova/exception.py:704 #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance " @@ -823,128 +823,128 @@ msgstr "" "La dirección IP fija %(address)s ya se está utilizando en la instancia " "%(instance_uuid)s." -#: nova/exception.py:705 +#: nova/exception.py:709 #, python-format msgid "More than one instance is associated with fixed ip address '%(address)s'." msgstr "Hay más de una instancia asociada con la dirección IP fija '%(address)s'." -#: nova/exception.py:710 +#: nova/exception.py:714 #, python-format msgid "Fixed IP address %(address)s is invalid." msgstr "La dirección IP fija %(address)s no es válida." -#: nova/exception.py:715 +#: nova/exception.py:719 msgid "Zero fixed ips available." msgstr "No hay ninguna dirección IP fija disponible." -#: nova/exception.py:719 +#: nova/exception.py:723 msgid "Zero fixed ips could be found." msgstr "No se ha podido encontrar ninguna dirección IP fija." -#: nova/exception.py:723 +#: nova/exception.py:727 #, python-format msgid "Floating ip %(address)s already exists." msgstr "Ya existe la dirección IP flotante %(address)s." -#: nova/exception.py:728 +#: nova/exception.py:732 #, python-format msgid "Floating ip not found for id %(id)s." msgstr "No se ha encontrado ninguna dirección IP flotante para el id %(id)s." -#: nova/exception.py:732 +#: nova/exception.py:736 #, python-format msgid "The DNS entry %(name)s already exists in domain %(domain)s." msgstr "La entrada de DNS %(name)s ya existe en el dominio %(domain)s." -#: nova/exception.py:736 +#: nova/exception.py:740 #, python-format msgid "Floating ip not found for address %(address)s." msgstr "" "No se ha encontrado ninguna dirección IP flotante para la dirección " "%(address)s." -#: nova/exception.py:740 +#: nova/exception.py:744 #, python-format msgid "Floating ip not found for host %(host)s." msgstr "No se ha encontrado ninguna dirección IP flotante para el host %(host)s." -#: nova/exception.py:744 +#: nova/exception.py:748 #, python-format msgid "Multiple floating ips are found for address %(address)s." msgstr "Se han encontrado varias ip flotantes para la dirección %(address)s." -#: nova/exception.py:748 +#: nova/exception.py:752 msgid "Floating ip pool not found." msgstr "No se ha encontrado pool de ip flotante." -#: nova/exception.py:753 +#: nova/exception.py:757 msgid "Zero floating ips available." msgstr "No hay ninguna dirección IP flotante disponible." -#: nova/exception.py:759 +#: nova/exception.py:763 #, python-format msgid "Floating ip %(address)s is associated." msgstr "La dirección IP flotante %(address)s está asociada." -#: nova/exception.py:763 +#: nova/exception.py:767 #, python-format msgid "Floating ip %(address)s is not associated." msgstr "La dirección IP flotante %(address)s no está asociada." -#: nova/exception.py:767 +#: nova/exception.py:771 msgid "Zero floating ips exist." msgstr "No existe ninguna dirección IP flotante." -#: nova/exception.py:772 +#: nova/exception.py:776 #, python-format msgid "Interface %(interface)s not found." msgstr "No se ha encontrado la interfaz %(interface)s." -#: nova/exception.py:777 nova/api/openstack/compute/contrib/floating_ips.py:97 +#: nova/exception.py:781 nova/api/openstack/compute/contrib/floating_ips.py:97 msgid "Cannot disassociate auto assigned floating ip" msgstr "No se puede desasociar la IP flotante asignada automáticamente" -#: nova/exception.py:782 +#: nova/exception.py:786 #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "No se ha encontrado el par de claves %(name)s para el usuario %(user_id)s" -#: nova/exception.py:786 +#: nova/exception.py:790 #, python-format msgid "Service %(service_id)s could not be found." msgstr "No se ha podido encontrar el servicio %(service_id)s." -#: nova/exception.py:790 +#: nova/exception.py:794 #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Servicio con host %(host)s binario %(binary)s existe." -#: nova/exception.py:794 +#: nova/exception.py:798 #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Servicio con host %(host)s asunto %(topic)s existe." -#: nova/exception.py:798 +#: nova/exception.py:802 #, python-format msgid "Host %(host)s could not be found." msgstr "No se ha podido encontrar el host %(host)s." -#: nova/exception.py:802 +#: nova/exception.py:806 #, python-format msgid "Compute host %(host)s could not be found." msgstr "No se ha podido encontrar el host de Compute %(host)s." -#: nova/exception.py:806 +#: nova/exception.py:810 #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "No se ha podido encontrar el binario %(binary)s en el host %(host)s." -#: nova/exception.py:810 +#: nova/exception.py:814 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Caducidad de reserva no válida %(expire)s." -#: nova/exception.py:814 +#: nova/exception.py:818 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " @@ -953,70 +953,70 @@ msgstr "" "El cambio produciría un uso inferior a 0 para los recursos siguientes: " "%(unders)s." -#: nova/exception.py:819 +#: nova/exception.py:823 msgid "Quota could not be found" msgstr "No se ha podido encontrar la cuota" -#: nova/exception.py:823 +#: nova/exception.py:827 #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Cuota existente para el proyecto %(project_id)s, recurso %(resource)s" -#: nova/exception.py:828 +#: nova/exception.py:832 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos de cuota desconocidos %(unknown)s." -#: nova/exception.py:832 +#: nova/exception.py:836 #, python-format msgid "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "No se ha encontrado la cuota para el usuario %(user_id)s en el proyecto " "%(project_id)s." -#: nova/exception.py:837 +#: nova/exception.py:841 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "No se ha encontrado la cuota para el proyecto %(project_id)s." -#: nova/exception.py:841 +#: nova/exception.py:845 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "No se ha encontrado la clase de cuota %(class_name)s." -#: nova/exception.py:845 +#: nova/exception.py:849 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "No se ha encontrado el uso de cuota para el proyecto %(project_id)s." -#: nova/exception.py:849 +#: nova/exception.py:853 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "No se ha encontrado la reserva de cuota %(uuid)s." -#: nova/exception.py:853 +#: nova/exception.py:857 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Cuota superada para recursos: %(overs)s" -#: nova/exception.py:857 +#: nova/exception.py:861 #, python-format msgid "Security group %(security_group_id)s not found." msgstr "No se ha encontrado el grupo de seguridad %(security_group_id)s." -#: nova/exception.py:861 +#: nova/exception.py:865 #, python-format msgid "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "No se ha encontrado el grupo de seguridad %(security_group_id)s para el " "proyecto %(project_id)s." -#: nova/exception.py:866 +#: nova/exception.py:870 #, python-format msgid "Security group with rule %(rule_id)s not found." msgstr "No se ha encontrado el grupo de seguridad con la regla %(rule_id)s." -#: nova/exception.py:871 +#: nova/exception.py:875 #, python-format msgid "" "Security group %(security_group_name)s already exists for project " @@ -1025,7 +1025,7 @@ msgstr "" "El grupo de seguridad %(security_group_name)s ya existe para el proyecto " "%(project_id)s" -#: nova/exception.py:876 +#: nova/exception.py:880 #, python-format msgid "" "Security group %(security_group_id)s is already associated with the " @@ -1034,7 +1034,7 @@ msgstr "" "El grupo de seguridad %(security_group_id)s ya está asociado con la " "instancia %(instance_id)s" -#: nova/exception.py:881 +#: nova/exception.py:885 #, python-format msgid "" "Security group %(security_group_id)s is not associated with the instance " @@ -1043,14 +1043,14 @@ msgstr "" "El grupo de seguridad %(security_group_id)s no está asociado con la " "instancia %(instance_id)s" -#: nova/exception.py:886 +#: nova/exception.py:890 #, python-format msgid "Security group default rule (%rule_id)s not found." msgstr "" "La regla predeterminada (%rule_id)s del grupo de seguridad no se ha " "encontrado." -#: nova/exception.py:890 +#: nova/exception.py:894 msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." @@ -1058,33 +1058,33 @@ msgstr "" "La red requiere port_security_enabled y una subred asociada para aplicar " "grupos de seguridad." -#: nova/exception.py:896 +#: nova/exception.py:900 #, python-format msgid "Rule already exists in group: %(rule)s" msgstr "La regla ya existe en el grupo: %(rule)s" -#: nova/exception.py:900 +#: nova/exception.py:904 msgid "No Unique Match Found." msgstr "No se ha encontrado una sola coincidencia." -#: nova/exception.py:905 +#: nova/exception.py:909 #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "No se ha podido encontrar la migración %(migration_id)s." -#: nova/exception.py:909 +#: nova/exception.py:913 #, python-format msgid "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "No se ha encontrado la migración para la instancia %(instance_id)s con el" " estado %(status)s." -#: nova/exception.py:914 +#: nova/exception.py:918 #, python-format msgid "Console pool %(pool_id)s could not be found." msgstr "No se ha podido encontrar la agrupación de consolas %(pool_id)s. " -#: nova/exception.py:918 +#: nova/exception.py:922 #, python-format msgid "" "Console pool with host %(host)s, console_type %(console_type)s and " @@ -1093,7 +1093,7 @@ msgstr "" "El pool de consolas con host %(host)s, console_type %(console_type)s y " "compute_host %(compute_host)s ya existe." -#: nova/exception.py:924 +#: nova/exception.py:928 #, python-format msgid "" "Console pool of type %(console_type)s for compute host %(compute_host)s " @@ -1102,17 +1102,17 @@ msgstr "" "No se ha encontrado la agrupación de consolas de tipo %(console_type)s " "para el host de cálculo %(compute_host)s en el host de proxy %(host)s." -#: nova/exception.py:930 +#: nova/exception.py:934 #, python-format msgid "Console %(console_id)s could not be found." msgstr "No se ha podido encontrar la consola %(console_id)s." -#: nova/exception.py:934 +#: nova/exception.py:938 #, python-format msgid "Console for instance %(instance_uuid)s could not be found." msgstr "No se ha podido encontrar la consola para la instancia %(instance_uuid)s." -#: nova/exception.py:938 +#: nova/exception.py:942 #, python-format msgid "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " @@ -1121,99 +1121,106 @@ msgstr "" "No se ha podido encontrar la consola para la instancia %(instance_uuid)s " "en la agrupación %(pool_id)s." -#: nova/exception.py:943 +#: nova/exception.py:947 #, python-format msgid "Invalid console type %(console_type)s" msgstr "Tipo de consola %(console_type)s no válido " -#: nova/exception.py:947 +#: nova/exception.py:951 #, python-format msgid "Unavailable console type %(console_type)s." msgstr "El tipo de consola %(console_type)s no está disponible." -#: nova/exception.py:951 +#: nova/exception.py:955 #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "El puerto de rangos de consola %(min_port)d-%(max_port)d se ha agotado." -#: nova/exception.py:956 +#: nova/exception.py:960 #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "No se ha podido encontrar el tipo %(flavor_id)s." -#: nova/exception.py:960 +#: nova/exception.py:964 #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "No se puede encontrar el sabor con nombre %(flavor_name)s." -#: nova/exception.py:964 +#: nova/exception.py:968 #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "No se ha encontrado el acceso de sabor para la combinación %(flavor_id)s " "/ %(project_id)s. " -#: nova/exception.py:969 +#: nova/exception.py:973 +#, python-format +msgid "" +"Flavor %(id)d extra spec cannot be updated or created after %(retries)d " +"retries." +msgstr "" + +#: nova/exception.py:978 #, python-format msgid "Cell %(cell_name)s doesn't exist." msgstr "La célula %(cell_name)s no existe." -#: nova/exception.py:973 +#: nova/exception.py:982 #, python-format msgid "Cell with name %(name)s already exists." msgstr "Una celda con el nombre %(name)s ya existe." -#: nova/exception.py:977 +#: nova/exception.py:986 #, python-format msgid "Inconsistency in cell routing: %(reason)s" msgstr "Incoherencia en direccionamiento de célula: %(reason)s" -#: nova/exception.py:981 +#: nova/exception.py:990 #, python-format msgid "Service API method not found: %(detail)s" msgstr "No se ha encontrado el método de API de servicio: %(detail)s" -#: nova/exception.py:985 +#: nova/exception.py:994 msgid "Timeout waiting for response from cell" msgstr "Se ha excedido el tiempo de espera de respuesta de la célula" -#: nova/exception.py:989 +#: nova/exception.py:998 #, python-format msgid "Cell message has reached maximum hop count: %(hop_count)s" msgstr "" "El mensaje de célula ha alcanzado la cuenta de saltos máxima: " "%(hop_count)s" -#: nova/exception.py:993 +#: nova/exception.py:1002 msgid "No cells available matching scheduling criteria." msgstr "" "No hay células disponibles que coincidan con los criterios de " "planificación." -#: nova/exception.py:997 +#: nova/exception.py:1006 msgid "Cannot update cells configuration file." msgstr "No se puede actualizar el archivo de configuración de la celda." -#: nova/exception.py:1001 +#: nova/exception.py:1010 #, python-format msgid "Cell is not known for instance %(instance_uuid)s" msgstr "No se conoce la célula en la instancia %(instance_uuid)s" -#: nova/exception.py:1005 +#: nova/exception.py:1014 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "No se ha podido encontrar el filtro de host de planificador " "%(filter_name)s." -#: nova/exception.py:1009 +#: nova/exception.py:1018 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "el sabor %(flavor_id)s no tiene especificaciones extras con clave " "%(extra_specs_key)s" -#: nova/exception.py:1014 +#: nova/exception.py:1023 #, python-format msgid "" "Metric %(name)s could not be found on the compute host node " @@ -1222,67 +1229,67 @@ msgstr "" "La métrica %(name)s no se puede encontrar en el nodo de cómputo anfitrión" " %(host)s:%(node)s." -#: nova/exception.py:1019 +#: nova/exception.py:1028 #, python-format msgid "File %(file_path)s could not be found." msgstr "No se ha podido encontrar el archivo %(file_path)s." -#: nova/exception.py:1023 +#: nova/exception.py:1032 msgid "Zero files could be found." msgstr "No se ha podido encontrar ningún archivo." -#: nova/exception.py:1027 +#: nova/exception.py:1036 #, python-format msgid "Virtual switch associated with the network adapter %(adapter)s not found." msgstr "" "No se ha encontrado ningún conmutador virtual asociado con el adaptador " "de red %(adapter)s." -#: nova/exception.py:1032 +#: nova/exception.py:1041 #, python-format msgid "Network adapter %(adapter)s could not be found." msgstr "No se ha podido encontrar el adaptador de red %(adapter)s." -#: nova/exception.py:1036 +#: nova/exception.py:1045 #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "No se ha podido encontrar la clase %(class_name)s: %(exception)s" -#: nova/exception.py:1040 +#: nova/exception.py:1049 msgid "Action not allowed." msgstr "Acción no permitida. " -#: nova/exception.py:1044 +#: nova/exception.py:1053 msgid "Rotation is not allowed for snapshots" msgstr "No se permite la rotación para instantáneas" -#: nova/exception.py:1048 +#: nova/exception.py:1057 msgid "Rotation param is required for backup image_type" msgstr "" "El parámetro de rotación es necesario para el tipo de imagen de copia de " "seguridad " -#: nova/exception.py:1053 nova/tests/compute/test_keypairs.py:144 +#: nova/exception.py:1062 nova/tests/compute/test_keypairs.py:144 #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "El par de claves '%(key_name)s' ya existe." -#: nova/exception.py:1057 +#: nova/exception.py:1066 #, python-format msgid "Instance %(name)s already exists." msgstr "La instancia %(name)s ya existe." -#: nova/exception.py:1061 +#: nova/exception.py:1070 #, python-format msgid "Flavor with name %(name)s already exists." msgstr "El sabor con nombre %(name)s ya existe." -#: nova/exception.py:1065 +#: nova/exception.py:1074 #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "El sabor con ID %(flavor_id)s ya existe." -#: nova/exception.py:1069 +#: nova/exception.py:1078 #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " @@ -1291,86 +1298,86 @@ msgstr "" "Versión de acceso ya existe para la combinación de la versión " "%(flavor_id)s y el proyecto %(project_id)s." -#: nova/exception.py:1074 +#: nova/exception.py:1083 #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s no está en un almacenamiento compartido: %(reason)s" -#: nova/exception.py:1078 +#: nova/exception.py:1087 #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s no está en un almacenamiento local: %(reason)s" -#: nova/exception.py:1082 +#: nova/exception.py:1091 #, python-format msgid "Storage error: %(reason)s" msgstr "" -#: nova/exception.py:1086 +#: nova/exception.py:1095 #, python-format msgid "Migration error: %(reason)s" msgstr "Error en migración: %(reason)s" -#: nova/exception.py:1090 +#: nova/exception.py:1099 #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Error de pre-verificación de migraión: %(reason)s" -#: nova/exception.py:1094 +#: nova/exception.py:1103 #, python-format msgid "Malformed message body: %(reason)s" msgstr "Cuerpo de mensaje con formato incorrecto: %(reason)s" -#: nova/exception.py:1100 +#: nova/exception.py:1109 #, python-format msgid "Could not find config at %(path)s" msgstr "No se ha podido encontrar configuración en %(path)s" -#: nova/exception.py:1104 +#: nova/exception.py:1113 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "No se ha podido cargar aplicación de pegar '%(name)s' desde %(path)s " -#: nova/exception.py:1108 +#: nova/exception.py:1117 msgid "When resizing, instances must change flavor!" msgstr "Al redimensionarse, las instancias deben cambiar de sabor." -#: nova/exception.py:1112 +#: nova/exception.py:1121 #, python-format msgid "Resize error: %(reason)s" msgstr "Error de redimensionamiento: %(reason)s" -#: nova/exception.py:1116 +#: nova/exception.py:1125 #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "El disco del servidor fue incapaz de re-escalarse debido a: %(reason)s" -#: nova/exception.py:1120 +#: nova/exception.py:1129 msgid "Flavor's memory is too small for requested image." msgstr "La memoria del sabor es demasiado pequeña para la imagen solicitada." -#: nova/exception.py:1124 +#: nova/exception.py:1133 msgid "Flavor's disk is too small for requested image." msgstr "El disco del sabor es demasiado pequeño para la imagen solicitada." -#: nova/exception.py:1128 +#: nova/exception.py:1137 #, python-format msgid "Insufficient free memory on compute node to start %(uuid)s." msgstr "" "No hay suficiente memoria libre en el nodo de cálculo para iniciar " "%(uuid)s." -#: nova/exception.py:1132 +#: nova/exception.py:1141 #, python-format msgid "No valid host was found. %(reason)s" msgstr "No se ha encontrado ningún host válido. %(reason)s" -#: nova/exception.py:1137 +#: nova/exception.py:1146 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Cuota excedida: código=%(code)s" -#: nova/exception.py:1144 +#: nova/exception.py:1153 #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " @@ -1379,44 +1386,44 @@ msgstr "" "Se ha superado la cuota para %(overs)s: se ha solicitado %(req)s, pero ya" " se utiliza %(used)d de %(allowed)d %(resource)s." -#: nova/exception.py:1149 +#: nova/exception.py:1158 msgid "Maximum number of floating ips exceeded" msgstr "Se ha superado el número máximo de IP flotantes" -#: nova/exception.py:1153 +#: nova/exception.py:1162 msgid "Maximum number of fixed ips exceeded" msgstr "Se ha superado el número máximo de IP fijas." -#: nova/exception.py:1157 +#: nova/exception.py:1166 #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "El número máximo de elementos de metadatos supera %(allowed)d" -#: nova/exception.py:1161 +#: nova/exception.py:1170 msgid "Personality file limit exceeded" msgstr "Se ha superado el límite de archivo de personalidad" -#: nova/exception.py:1165 +#: nova/exception.py:1174 msgid "Personality file path too long" msgstr "Vía de acceso de archivo de personalidad demasiado larga" -#: nova/exception.py:1169 +#: nova/exception.py:1178 msgid "Personality file content too long" msgstr "Contenido del archivo de personalidad demasiado largo" -#: nova/exception.py:1173 nova/tests/compute/test_keypairs.py:155 +#: nova/exception.py:1182 nova/tests/compute/test_keypairs.py:155 msgid "Maximum number of key pairs exceeded" msgstr "Se ha superado el número máximo de pares de claves" -#: nova/exception.py:1178 +#: nova/exception.py:1187 msgid "Maximum number of security groups or rules exceeded" msgstr "Se ha superado el número máximo de grupos o reglas de seguridad" -#: nova/exception.py:1182 +#: nova/exception.py:1191 msgid "Maximum number of ports exceeded" msgstr "El número máximo de puertos ha sido excedido." -#: nova/exception.py:1186 +#: nova/exception.py:1195 #, python-format msgid "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " @@ -1425,144 +1432,144 @@ msgstr "" "Agregado %(aggregate_id)s: la acción '%(action)s' ha producido un error: " "%(reason)s." -#: nova/exception.py:1191 +#: nova/exception.py:1200 #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "No se ha podido encontrar el agregado %(aggregate_id)s." -#: nova/exception.py:1195 +#: nova/exception.py:1204 #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "El agregado %(aggregate_name)s ya existe." -#: nova/exception.py:1199 +#: nova/exception.py:1208 #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "El agregado %(aggregate_id)s no tiene ningún host %(host)s." -#: nova/exception.py:1203 +#: nova/exception.py:1212 #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "El agregado %(aggregate_id)s no tiene metadatos con la clave " "%(metadata_key)s." -#: nova/exception.py:1208 +#: nova/exception.py:1217 #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "El agregado %(aggregate_id)s ya tiene el host %(host)s." -#: nova/exception.py:1212 +#: nova/exception.py:1221 msgid "Unable to create flavor" msgstr "Incapaz de crear sabor" -#: nova/exception.py:1216 +#: nova/exception.py:1225 #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "No se ha podido establecer la contraseña de administrador en %(instance)s" " debido a %(reason)s" -#: nova/exception.py:1222 +#: nova/exception.py:1231 #, python-format msgid "Detected existing vlan with id %(vlan)d" msgstr "Se ha detectado una vlan existente con el ID %(vlan)d" -#: nova/exception.py:1226 +#: nova/exception.py:1235 msgid "There was a conflict when trying to complete your request." msgstr "Hubo un conflicto tratándo de completar su solicitud." -#: nova/exception.py:1232 +#: nova/exception.py:1241 #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "No se ha podido encontrar la instancia %(instance_id)s." -#: nova/exception.py:1236 +#: nova/exception.py:1245 #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" "No se ha podido encontrar la memoria caché de información para la " "instancia %(instance_uuid)s." -#: nova/exception.py:1241 +#: nova/exception.py:1250 #, python-format msgid "Node %(node_id)s could not be found." msgstr "No se ha podido encontrar el nodo %(node_id)s." -#: nova/exception.py:1245 +#: nova/exception.py:1254 #, python-format msgid "Node with UUID %(node_uuid)s could not be found." msgstr "No se ha podido encontrar el nodo con el UUID %(node_uuid)s." -#: nova/exception.py:1249 +#: nova/exception.py:1258 #, python-format msgid "Marker %(marker)s could not be found." msgstr "No se ha podido encontrar el marcador %(marker)s." -#: nova/exception.py:1254 +#: nova/exception.py:1263 #, python-format msgid "Invalid id: %(val)s (expecting \"i-...\")." msgstr "ID no válido: %(val)s (se espera \"i-...\")." -#: nova/exception.py:1258 +#: nova/exception.py:1267 #, python-format msgid "Could not fetch image %(image_id)s" msgstr "No se ha podido captar la imagen %(image_id)s" -#: nova/exception.py:1262 +#: nova/exception.py:1271 #, python-format msgid "Could not upload image %(image_id)s" msgstr "No se ha podido cargar la imagen %(image_id)s" -#: nova/exception.py:1266 +#: nova/exception.py:1275 #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "La tarea %(task_name)s ya se está ejecutando en el host %(host)s" -#: nova/exception.py:1270 +#: nova/exception.py:1279 #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "La tarea %(task_name)s no se está ejecutando en el host %(host)s" -#: nova/exception.py:1274 +#: nova/exception.py:1283 #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "La instancia %(instance_uuid)s está bloqueada" -#: nova/exception.py:1278 +#: nova/exception.py:1287 #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Valor inválido para la opción de configuración de controlador: %(option)s" -#: nova/exception.py:1282 +#: nova/exception.py:1291 #, python-format msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "No se ha podido montar la unidad de configuración vfat. %(operation)s ha " "fallado. Error: %(error)s" -#: nova/exception.py:1287 +#: nova/exception.py:1296 #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Formato de unidad de configuración desconocido %(format)s. Seleccione uno" " de iso9660 o vfat." -#: nova/exception.py:1292 +#: nova/exception.py:1301 #, python-format msgid "Failed to attach network adapter device to %(instance)s" msgstr "" "Se ha encontrado un error en la conexión del dispositivo de adaptador de " "red a %(instance)s." -#: nova/exception.py:1296 +#: nova/exception.py:1305 #, python-format msgid "Failed to detach network adapter device from %(instance)s" msgstr "" "Se ha encontrado un error en la desconexión del dispositivo de adaptador " "de red a %(instance)s." -#: nova/exception.py:1300 +#: nova/exception.py:1309 #, python-format msgid "" "User data too large. User data must be no larger than %(maxsize)s bytes " @@ -1572,11 +1579,11 @@ msgstr "" "más de %(maxsize)s bytes una vez se ha codificado base64. Sus datos " "tienen %(length)d bytes." -#: nova/exception.py:1306 +#: nova/exception.py:1315 msgid "User data needs to be valid base 64." msgstr "Los datos de usuario deben ser de base 64 válidos." -#: nova/exception.py:1310 +#: nova/exception.py:1319 #, python-format msgid "" "Unexpected task state: expecting %(expected)s but the actual state is " @@ -1585,7 +1592,7 @@ msgstr "" "Estado de tarea inesperado: se esperaba %(expected)s pero el estado es " "%(actual)s" -#: nova/exception.py:1319 +#: nova/exception.py:1328 #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not " @@ -1594,12 +1601,12 @@ msgstr "" "La acción para request_id %(request_id)s en la instancia " "%(instance_uuid)s no se ha encontrado." -#: nova/exception.py:1324 +#: nova/exception.py:1333 #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "No se ha encontrado el suceso %(event)s para el id de acción %(action_id)s" -#: nova/exception.py:1328 +#: nova/exception.py:1337 #, python-format msgid "" "Unexpected VM state: expecting %(expected)s but the actual state is " @@ -1608,21 +1615,21 @@ msgstr "" "Estado de VM inesperado: se esperaba %(expected)s pero el estado actual " "es %(actual)s" -#: nova/exception.py:1333 +#: nova/exception.py:1342 #, python-format msgid "The CA file for %(project)s could not be found" msgstr "No se ha podido encontrar el archivo CA para %(project)s " -#: nova/exception.py:1337 +#: nova/exception.py:1346 #, python-format msgid "The CRL file for %(project)s could not be found" msgstr "No se ha podido encontrar el archivo CRL para %(project)s" -#: nova/exception.py:1341 +#: nova/exception.py:1350 msgid "Instance recreate is not supported." msgstr "La recreación de la instancia no está soportada." -#: nova/exception.py:1345 +#: nova/exception.py:1354 #, python-format msgid "" "The service from servicegroup driver %(driver)s is temporarily " @@ -1631,21 +1638,21 @@ msgstr "" "El servicio del controlador servicegroup %(driver)s está temporalmente no" " disponible." -#: nova/exception.py:1350 +#: nova/exception.py:1359 #, python-format msgid "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s ha intentado un acceso de bases de datos directo que no está " "permitido por la política." -#: nova/exception.py:1355 +#: nova/exception.py:1364 #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "El tipo de virtualización '%(virt)s' no está soportado por este " "controlador de cálculo" -#: nova/exception.py:1360 +#: nova/exception.py:1369 #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt " @@ -1654,123 +1661,123 @@ msgstr "" "El hardware solicitado '%(model)s' no está soportado por el controlador " "de virtualización '%(virt)s'" -#: nova/exception.py:1365 +#: nova/exception.py:1374 #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Datos Base-64 inválidos para el archivo %(path)s" -#: nova/exception.py:1369 +#: nova/exception.py:1378 #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Construcción de instancia %(instance_uuid)s abortada: %(reason)s" -#: nova/exception.py:1373 +#: nova/exception.py:1382 #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "Construcción de instancia %(instance_uuid)s reprogramada: %(reason)s" -#: nova/exception.py:1378 +#: nova/exception.py:1387 #, python-format msgid "Shadow table with name %(name)s already exists." msgstr "Una Tabla Shadow con nombre %(name)s ya existe." -#: nova/exception.py:1383 +#: nova/exception.py:1392 #, python-format msgid "Instance rollback performed due to: %s" msgstr "Reversión de instancia ejecutada debido a: %s" -#: nova/exception.py:1389 +#: nova/exception.py:1398 #, python-format msgid "Unsupported object type %(objtype)s" msgstr "Tipo de objeto no soportado %(objtype)s" -#: nova/exception.py:1393 +#: nova/exception.py:1402 #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "No se puede ejecutar %(method)s en un objecto huérfano %(objtype)s" -#: nova/exception.py:1397 +#: nova/exception.py:1406 #, python-format msgid "Version %(objver)s of %(objname)s is not supported" msgstr "Versión %(objver)s de %(objname)s no está soportada" -#: nova/exception.py:1401 +#: nova/exception.py:1410 #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "" -#: nova/exception.py:1405 +#: nova/exception.py:1414 #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "La acción objeto %(action)s falló debido a: %(reason)s" -#: nova/exception.py:1409 +#: nova/exception.py:1418 #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "El campo %(field)s de %(objname)s no es una instancia de campo." -#: nova/exception.py:1413 +#: nova/exception.py:1422 #, python-format msgid "Core API extensions are missing: %(missing_apis)s" msgstr "Faltan las extensiones Core API : %(missing_apis)s" -#: nova/exception.py:1417 +#: nova/exception.py:1426 #, python-format msgid "Error during following call to agent: %(method)s" msgstr "Error durante la siguiente llamada al agente: %(method)s" -#: nova/exception.py:1421 +#: nova/exception.py:1430 #, python-format msgid "Unable to contact guest agent. The following call timed out: %(method)s" msgstr "" "Unposible contactar al agente invitado. La siguiente llamada agotó su " "tiempo de espera: %(method)s" -#: nova/exception.py:1426 +#: nova/exception.py:1435 #, python-format msgid "Agent does not support the call: %(method)s" msgstr "El agente no soporta la llamada %(method)s" -#: nova/exception.py:1430 +#: nova/exception.py:1439 #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "No se ha podido encontrar el grupo de instancias %(group_uuid)s." -#: nova/exception.py:1434 +#: nova/exception.py:1443 #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "El grupo de instancias %(group_uuid)s ya existe." -#: nova/exception.py:1438 +#: nova/exception.py:1447 #, python-format msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s." msgstr "" "El grupo de instancias %(group_uuid)s no tiene metadatos con clave " "%(metadata_key)s" -#: nova/exception.py:1443 +#: nova/exception.py:1452 #, python-format msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s." msgstr "" "El grupo de instancias %(group_uuid)s no tiene miembro con identificador " "%(instance_id)s." -#: nova/exception.py:1448 +#: nova/exception.py:1457 #, python-format msgid "Instance group %(group_uuid)s has no policy %(policy)s." msgstr "El grupo de instancias %(group_uuid)s no tiene política %(policy)s" -#: nova/exception.py:1452 +#: nova/exception.py:1461 #, python-format msgid "Number of retries to plugin (%(num_retries)d) exceeded." msgstr "Se ha excedido el número de reintentos para el plugin (%(num_retries)d)." -#: nova/exception.py:1456 +#: nova/exception.py:1465 #, python-format msgid "There was an error with the download module %(module)s. %(reason)s" msgstr "Hubo un error con el módulo de descarga %(module)s. %(reason)s" -#: nova/exception.py:1461 +#: nova/exception.py:1470 #, python-format msgid "" "The metadata for this location will not work with this module %(module)s." @@ -1779,37 +1786,37 @@ msgstr "" "Los metadatos para esta ubicación no funcionarán con este módulo " "%(module)s. %(reason)s." -#: nova/exception.py:1466 +#: nova/exception.py:1475 #, python-format msgid "The method %(method_name)s is not implemented." msgstr "El método %(method_name)s no está implementado." -#: nova/exception.py:1470 +#: nova/exception.py:1479 #, python-format msgid "The module %(module)s is misconfigured: %(reason)s." msgstr "El módulo %(module)s está mal configurado: %(reason)s" -#: nova/exception.py:1474 +#: nova/exception.py:1483 #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Error al crear monitor de recursos: %(monitor)s" -#: nova/exception.py:1478 +#: nova/exception.py:1487 #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "La dirección PCI %(address)s tiene un formato incorrecto." -#: nova/exception.py:1482 +#: nova/exception.py:1491 #, python-format msgid "PCI device %(id)s not found" msgstr "Dispositivo PCI %(id)s no encontrado" -#: nova/exception.py:1486 +#: nova/exception.py:1495 #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Dispositivo PCI %(node_id)s:%(address)s no encontrado." -#: nova/exception.py:1490 +#: nova/exception.py:1499 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " @@ -1818,7 +1825,7 @@ msgstr "" "el dispositivo PCI %(compute_node_id)s:%(address)s está %(status)s en " "lugar de %(hopestatus)s" -#: nova/exception.py:1496 +#: nova/exception.py:1505 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead " @@ -1827,12 +1834,12 @@ msgstr "" "El dueño del dispositivo PCI %(compute_node_id)s:%(address)s es %(owner)s" " en lugar de %(hopeowner)s" -#: nova/exception.py:1502 +#: nova/exception.py:1511 #, python-format msgid "PCI device request (%requests)s failed" msgstr "Solicitud de dispositivo PCI (%request)s fallida" -#: nova/exception.py:1507 +#: nova/exception.py:1516 #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty " @@ -1841,32 +1848,32 @@ msgstr "" "Intento de consumir dispositivo PCI %(compute_node_id)s:%(address)s de " "pool vacío" -#: nova/exception.py:1513 +#: nova/exception.py:1522 #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Definición de alias PCI inválido: %(reason)s" -#: nova/exception.py:1517 +#: nova/exception.py:1526 #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "Alias PCI %(alias)s no definido" -#: nova/exception.py:1522 +#: nova/exception.py:1531 #, python-format msgid "Not enough parameters: %(reason)s" msgstr "No hay suficientes parámetros: %(reason)s" -#: nova/exception.py:1527 +#: nova/exception.py:1536 #, python-format msgid "Invalid PCI devices Whitelist config %(reason)s" msgstr "Configuración de lista permisiva de dispositivos PCI inválida %(reason)s" -#: nova/exception.py:1531 +#: nova/exception.py:1540 #, python-format msgid "Cannot change %(node_id)s to %(new_node_id)s" msgstr "No se puede cambiar %(node_id)s hacia %(new_node_id)s" -#: nova/exception.py:1541 +#: nova/exception.py:1550 #, python-format msgid "" "Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: " @@ -1875,39 +1882,39 @@ msgstr "" "Fallo al preparar el dispositivo PCI %(id)s para la instancia " "%(instance_uuid)s: %(reason)s" -#: nova/exception.py:1546 +#: nova/exception.py:1555 #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Fallo al desasociar el dispositivo PCI %(dev)s: %(reason)s" -#: nova/exception.py:1550 +#: nova/exception.py:1559 #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "El hipervisor %(type)s no soporta dispositivos PCI" -#: nova/exception.py:1554 +#: nova/exception.py:1563 #, python-format msgid "Key manager error: %(reason)s" msgstr "error de administrador de claves: %(reason)s" -#: nova/exception.py:1558 +#: nova/exception.py:1567 #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Fallo al remover el(los) volumen(es): (%(reason)s)" -#: nova/exception.py:1562 +#: nova/exception.py:1571 #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Modelo de vídeo proporcionado (%(model)s) no está sopotado." -#: nova/exception.py:1566 +#: nova/exception.py:1575 #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "La ruta del dispositivo RNG proporcionada: (%(path)s) no está presente en" " el anfitrión." -#: nova/exception.py:1571 +#: nova/exception.py:1580 #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the " @@ -1916,24 +1923,52 @@ msgstr "" "La cantidad solicitada de memoria de vídeo %(req_vram)d es mayor que la " "máxima permitida por el sabor %(max_vram)d." -#: nova/exception.py:1576 +#: nova/exception.py:1585 #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "La acción watchdog proporcionada (%(action)s) no está soportada." -#: nova/exception.py:1580 +#: nova/exception.py:1589 msgid "" -"Block migration of instances with config drives is not supported in " -"libvirt." +"Live migration of instances with config drives is not supported in " +"libvirt unless libvirt instance path and drive data is shared across " +"compute nodes." msgstr "" -"La migración de bloque de instancias con discos configurados no está " -"soportada en libvirt." -#: nova/exception.py:1585 +#: nova/exception.py:1595 +#, python-format +msgid "" +"Host %(server)s is running an old version of Nova, live migrations " +"involving that version may cause data loss. Upgrade Nova on %(server)s " +"and try again." +msgstr "" + +#: nova/exception.py:1601 #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Error durante la extracción de la instancia %(instance_id)s: %(reason)s" +#: nova/exception.py:1605 +#, python-format +msgid "" +"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted " +"%(maxsockets)d:%(maxcores)d:%(maxthreads)d" +msgstr "" + +#: nova/exception.py:1610 +#, python-format +msgid "" +"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted " +"%(maxsockets)d:%(maxcores)d:%(maxthreads)d" +msgstr "" + +#: nova/exception.py:1615 +#, python-format +msgid "" +"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to" +" satisfy for vcpus count %(vcpus)d" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -1957,114 +1992,114 @@ msgstr "Ha fallado la entrega de reservas %s|" msgid "Failed to roll back reservations %s" msgstr "Fallo al revertir las reservas %s" -#: nova/service.py:160 +#: nova/service.py:161 #, python-format msgid "Starting %(topic)s node (version %(version)s)" msgstr "Iniciando el nodo %(topic)s (versión %(version)s)" -#: nova/service.py:285 +#: nova/service.py:286 msgid "Service killed that has no database entry" msgstr "Se detuvo un servicio sin entrada en la base de datos" -#: nova/service.py:297 +#: nova/service.py:298 msgid "Service error occurred during cleanup_host" msgstr "Ha ocurrido un error de servicio durante cleanup_host" -#: nova/service.py:314 +#: nova/service.py:315 #, python-format msgid "Temporary directory is invalid: %s" msgstr "El directorio temporal no es válido: %s" -#: nova/service.py:339 +#: nova/service.py:340 #, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "El valor %(worker_name)s de %(workers)s es inválido, debe ser mayor que 0." -#: nova/service.py:424 +#: nova/service.py:433 msgid "serve() can only be called once" msgstr "serve() sólo se puede llamar una vez " -#: nova/utils.py:148 +#: nova/utils.py:147 #, python-format msgid "Expected to receive %(exp)s bytes, but actually %(act)s" msgstr "Se esperaba recibir %(exp)s bytes, se han recibido %(act)s" -#: nova/utils.py:354 +#: nova/utils.py:353 #, python-format msgid "Couldn't get IPv4 : %(ex)s" msgstr "No se ha podido obtener IPv4: %(ex)s" -#: nova/utils.py:370 +#: nova/utils.py:369 #, python-format msgid "IPv4 address is not found.: %s" msgstr "Dirección IPv4 no encontrada: %s" -#: nova/utils.py:373 +#: nova/utils.py:372 #, python-format msgid "Couldn't get IPv4 of %(interface)s : %(ex)s" msgstr "No se puede obtener la IPv4 de %(interface)s : %(ex)s" -#: nova/utils.py:388 +#: nova/utils.py:387 #, python-format msgid "Link Local address is not found.:%s" msgstr "No se encuentra la dirección del enlace local.:%s" -#: nova/utils.py:391 +#: nova/utils.py:390 #, python-format msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" -#: nova/utils.py:412 +#: nova/utils.py:411 #, python-format msgid "Invalid backend: %s" msgstr "backend inválido: %s" -#: nova/utils.py:457 +#: nova/utils.py:454 #, python-format msgid "Expected object of type: %s" msgstr "Se esperaba un objeto de tipo: %s" -#: nova/utils.py:485 +#: nova/utils.py:482 #, python-format msgid "Invalid server_string: %s" msgstr "Serie de servidor no válida: %s" -#: nova/utils.py:776 nova/virt/configdrive.py:177 +#: nova/utils.py:773 #, python-format msgid "Could not remove tmpdir: %s" msgstr "No se ha podido eliminar directorio temporal: %s" -#: nova/utils.py:966 +#: nova/utils.py:963 #, python-format msgid "%s is not a string or unicode" msgstr "%s no es una serie o unicode" -#: nova/utils.py:970 +#: nova/utils.py:967 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s requiere de, al menos, %(min_length)s caracteres." -#: nova/utils.py:975 +#: nova/utils.py:972 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s tiene más de %(max_length)s caracteres." -#: nova/utils.py:985 +#: nova/utils.py:982 #, python-format msgid "%(value_name)s must be an integer" msgstr "%(value_name)s debe ser un entero" -#: nova/utils.py:991 +#: nova/utils.py:988 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s debe ser >= %(min_value)d" -#: nova/utils.py:997 +#: nova/utils.py:994 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s debe ser <= %(max_value)d" -#: nova/utils.py:1031 +#: nova/utils.py:1028 #, python-format msgid "Hypervisor version %s is invalid." msgstr "" @@ -2074,32 +2109,32 @@ msgstr "" msgid "Failed to load %(cfgfile)s: %(ex)s" msgstr "Fallo al cargar %(cfgfile)s: %(ex)s" -#: nova/wsgi.py:132 +#: nova/wsgi.py:133 #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "No se puede asociar a %(host)s:%(port)s" -#: nova/wsgi.py:137 +#: nova/wsgi.py:138 #, python-format msgid "%(name)s listening on %(host)s:%(port)s" msgstr "%(name)s está escuchando en %(host)s:%(port)s" -#: nova/wsgi.py:152 nova/openstack/common/sslutils.py:50 +#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:50 #, python-format msgid "Unable to find cert_file : %s" msgstr "No se puede encontrar cert_file: %s" -#: nova/wsgi.py:156 nova/openstack/common/sslutils.py:53 +#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:53 #, python-format msgid "Unable to find ca_file : %s" msgstr "No se puede encontrar ca_file: %s" -#: nova/wsgi.py:160 nova/openstack/common/sslutils.py:56 +#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:56 #, python-format msgid "Unable to find key_file : %s" msgstr "No se puede encontrar key_file: %s" -#: nova/wsgi.py:164 nova/openstack/common/sslutils.py:59 +#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:59 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" @@ -2107,20 +2142,20 @@ msgstr "" "Al ejecutar el servidor en modalidad SSL, debe especificar un valor para " "las opciones cert_file y key_file en el archivo de configuración" -#: nova/wsgi.py:195 +#: nova/wsgi.py:202 #, python-format msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support" msgstr "No se ha podido iniciar %(name)s en %(host)s:%(port)s con soporte SSL" -#: nova/wsgi.py:223 +#: nova/wsgi.py:238 msgid "Stopping WSGI server." msgstr "Deteniendo el servidor WSGI. " -#: nova/wsgi.py:242 +#: nova/wsgi.py:258 msgid "WSGI server has stopped." msgstr "El servidor WSGI se ha detenido." -#: nova/wsgi.py:311 +#: nova/wsgi.py:327 msgid "You must implement __call__" msgstr "Debe implementar __call__" @@ -2201,155 +2236,155 @@ msgstr "Entorno: %s" msgid "Unknown error occurred." msgstr "Ha ocurrido un error desconocido." -#: nova/api/ec2/cloud.py:395 +#: nova/api/ec2/cloud.py:391 #, python-format msgid "Create snapshot of volume %s" msgstr "Crear instantánea del volumen %s" -#: nova/api/ec2/cloud.py:420 +#: nova/api/ec2/cloud.py:416 #, python-format msgid "Could not find key pair(s): %s" msgstr "No se ha podido encontrar par(es) de claves: %s " -#: nova/api/ec2/cloud.py:436 +#: nova/api/ec2/cloud.py:432 #, python-format msgid "Create key pair %s" msgstr "Creando par de claves %s" -#: nova/api/ec2/cloud.py:448 +#: nova/api/ec2/cloud.py:444 #, python-format msgid "Import key %s" msgstr "Importar la clave %s" -#: nova/api/ec2/cloud.py:461 +#: nova/api/ec2/cloud.py:457 #, python-format msgid "Delete key pair %s" msgstr "Borrar para de claves %s" -#: nova/api/ec2/cloud.py:603 nova/api/ec2/cloud.py:733 +#: nova/api/ec2/cloud.py:599 nova/api/ec2/cloud.py:729 msgid "need group_name or group_id" msgstr "se necesita group_name o group_id" -#: nova/api/ec2/cloud.py:608 +#: nova/api/ec2/cloud.py:604 msgid "can't build a valid rule" msgstr "No se ha podido crear una regla válida" -#: nova/api/ec2/cloud.py:616 +#: nova/api/ec2/cloud.py:612 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "Protocolo IP no válido %(protocol)s" -#: nova/api/ec2/cloud.py:650 nova/api/ec2/cloud.py:686 +#: nova/api/ec2/cloud.py:646 nova/api/ec2/cloud.py:682 msgid "No rule for the specified parameters." msgstr "No hay regla para los parámetros especificados." -#: nova/api/ec2/cloud.py:764 +#: nova/api/ec2/cloud.py:760 #, python-format msgid "Get console output for instance %s" msgstr "Obtener salida de la consola para la instancia %s" -#: nova/api/ec2/cloud.py:836 +#: nova/api/ec2/cloud.py:832 #, python-format msgid "Create volume from snapshot %s" msgstr "Crear volumen desde la instantánea %s" -#: nova/api/ec2/cloud.py:840 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:836 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "Crear volumen de %s GB" -#: nova/api/ec2/cloud.py:880 +#: nova/api/ec2/cloud.py:876 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" "Conectar el volumen %(volume_id)s a la instancia %(instance_id)s en " "%(device)s" -#: nova/api/ec2/cloud.py:910 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:906 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "Desasociar volumen %s" -#: nova/api/ec2/cloud.py:1242 +#: nova/api/ec2/cloud.py:1238 msgid "Allocate address" msgstr "Asignar dirección" -#: nova/api/ec2/cloud.py:1247 +#: nova/api/ec2/cloud.py:1243 #, python-format msgid "Release address %s" msgstr "Liberar dirección %s" -#: nova/api/ec2/cloud.py:1252 +#: nova/api/ec2/cloud.py:1248 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "Asociar dirección %(public_ip)s a instancia %(instance_id)s" -#: nova/api/ec2/cloud.py:1262 +#: nova/api/ec2/cloud.py:1258 msgid "Unable to associate IP Address, no fixed_ips." msgstr "No se puede asociar la dirección IP, sin fixed_ips." -#: nova/api/ec2/cloud.py:1270 -#: nova/api/openstack/compute/contrib/floating_ips.py:249 +#: nova/api/ec2/cloud.py:1266 +#: nova/api/openstack/compute/contrib/floating_ips.py:251 #, python-format msgid "multiple fixed_ips exist, using the first: %s" msgstr "existen múltiples fixed_ips, utilizando la primera: %s" -#: nova/api/ec2/cloud.py:1283 +#: nova/api/ec2/cloud.py:1279 #, python-format msgid "Disassociate address %s" msgstr "Desasociar dirección %s" -#: nova/api/ec2/cloud.py:1300 nova/api/openstack/compute/servers.py:918 +#: nova/api/ec2/cloud.py:1296 nova/api/openstack/compute/servers.py:918 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "min_count debe ser <= max_count " -#: nova/api/ec2/cloud.py:1332 +#: nova/api/ec2/cloud.py:1328 msgid "Image must be available" msgstr "La imagen debe estar disponible " -#: nova/api/ec2/cloud.py:1429 +#: nova/api/ec2/cloud.py:1424 #, python-format msgid "Reboot instance %r" msgstr "Reiniciar instancia %r" -#: nova/api/ec2/cloud.py:1542 +#: nova/api/ec2/cloud.py:1537 #, python-format msgid "De-registering image %s" msgstr "Des-registrando la imagen %s" -#: nova/api/ec2/cloud.py:1558 +#: nova/api/ec2/cloud.py:1553 msgid "imageLocation is required" msgstr "Se necesita imageLocation" -#: nova/api/ec2/cloud.py:1578 +#: nova/api/ec2/cloud.py:1573 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "Imagen registrada %(image_location)s con el id %(image_id)s" -#: nova/api/ec2/cloud.py:1639 +#: nova/api/ec2/cloud.py:1634 msgid "user or group not specified" msgstr "usuario o grupo no especificado" -#: nova/api/ec2/cloud.py:1642 +#: nova/api/ec2/cloud.py:1637 msgid "only group \"all\" is supported" msgstr "sólo el grupo \"all\" está soportado" -#: nova/api/ec2/cloud.py:1645 +#: nova/api/ec2/cloud.py:1640 msgid "operation_type must be add or remove" msgstr "operation_type debe ser añadir o eliminar" -#: nova/api/ec2/cloud.py:1647 +#: nova/api/ec2/cloud.py:1642 #, python-format msgid "Updating image %s publicity" msgstr "Actualizando imagen %s públicamente" -#: nova/api/ec2/cloud.py:1660 +#: nova/api/ec2/cloud.py:1655 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "No está permitido modificar los atributos para la imagen %s" -#: nova/api/ec2/cloud.py:1686 +#: nova/api/ec2/cloud.py:1685 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " @@ -2358,46 +2393,48 @@ msgstr "" "Valor no válido '%(ec2_instance_id)s' para el ID de instancia. La " "instancia no tiene ningún volumen conectado en la raíz (%(root)s)." -#: nova/api/ec2/cloud.py:1717 +#: nova/api/ec2/cloud.py:1718 #, python-format -msgid "Couldn't stop instance within %d sec" -msgstr "No se puede detener una instancia en menos de %d segundos" +msgid "" +"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " +"%(vm_state)s, current task_state: %(task_state)s" +msgstr "" -#: nova/api/ec2/cloud.py:1736 +#: nova/api/ec2/cloud.py:1742 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "imagen de %(instance)s en %(now)s" -#: nova/api/ec2/cloud.py:1761 nova/api/ec2/cloud.py:1811 +#: nova/api/ec2/cloud.py:1767 nova/api/ec2/cloud.py:1817 msgid "resource_id and tag are required" msgstr "resource_id y tag son necesarios" -#: nova/api/ec2/cloud.py:1765 nova/api/ec2/cloud.py:1815 +#: nova/api/ec2/cloud.py:1771 nova/api/ec2/cloud.py:1821 msgid "Expecting a list of resources" msgstr "Esperando una lista de recursos" -#: nova/api/ec2/cloud.py:1770 nova/api/ec2/cloud.py:1820 -#: nova/api/ec2/cloud.py:1878 +#: nova/api/ec2/cloud.py:1776 nova/api/ec2/cloud.py:1826 +#: nova/api/ec2/cloud.py:1884 msgid "Only instances implemented" msgstr "Sólo están implementadas instancias" -#: nova/api/ec2/cloud.py:1774 nova/api/ec2/cloud.py:1824 +#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1830 msgid "Expecting a list of tagSets" msgstr "Esperando una lista de tagSets" -#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1833 +#: nova/api/ec2/cloud.py:1786 nova/api/ec2/cloud.py:1839 msgid "Expecting tagSet to be key/value pairs" msgstr "Esperando que tagSet sea un par clave/valor" -#: nova/api/ec2/cloud.py:1787 +#: nova/api/ec2/cloud.py:1793 msgid "Expecting both key and value to be set" msgstr "Esperando establecimiento tanto de clave como valor" -#: nova/api/ec2/cloud.py:1838 +#: nova/api/ec2/cloud.py:1844 msgid "Expecting key to be set" msgstr "Esperando el establecimiento de la clave" -#: nova/api/ec2/cloud.py:1912 +#: nova/api/ec2/cloud.py:1918 msgid "Invalid CIDR" msgstr "CIDR no válido" @@ -2416,7 +2453,7 @@ msgstr "" msgid "Timestamp is invalid." msgstr "La indicación de fecha y hora no es válida." -#: nova/api/metadata/handler.py:111 +#: nova/api/metadata/handler.py:112 msgid "" "X-Instance-ID present in request headers. The " "'service_neutron_metadata_proxy' option must be enabled to process this " @@ -2426,32 +2463,32 @@ msgstr "" "'service_neutron_metadata_proy' debe ser habilitada para procesar este " "encabezado." -#: nova/api/metadata/handler.py:140 nova/api/metadata/handler.py:147 +#: nova/api/metadata/handler.py:141 nova/api/metadata/handler.py:148 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "Fallo al generar metadatos para la ip %s" -#: nova/api/metadata/handler.py:142 nova/api/metadata/handler.py:198 +#: nova/api/metadata/handler.py:143 nova/api/metadata/handler.py:199 msgid "An unknown error has occurred. Please try your request again." msgstr "Ha sucedido un error desconocido. Por favor repite el intento de nuevo." -#: nova/api/metadata/handler.py:160 +#: nova/api/metadata/handler.py:161 msgid "X-Instance-ID header is missing from request." msgstr "Falta la cabecera de ID de instancia X en la solicitud." -#: nova/api/metadata/handler.py:162 +#: nova/api/metadata/handler.py:163 msgid "X-Tenant-ID header is missing from request." msgstr "el encabezado X-Tenant-ID falta en la solicitud." -#: nova/api/metadata/handler.py:164 +#: nova/api/metadata/handler.py:165 msgid "Multiple X-Instance-ID headers found within request." msgstr "Se han encontrado varias cabeceas de ID de instancia X en la solicitud." -#: nova/api/metadata/handler.py:166 +#: nova/api/metadata/handler.py:167 msgid "Multiple X-Tenant-ID headers found within request." msgstr "Se han encontrado múltiples encabezados X-Tenant-ID en la solicitud." -#: nova/api/metadata/handler.py:180 +#: nova/api/metadata/handler.py:181 #, python-format msgid "" "X-Instance-ID-Signature: %(signature)s does not match the expected value:" @@ -2462,16 +2499,16 @@ msgstr "" "%(expected_signature)s para el ID: %(instance_id)s. Solicitud desde: " "%(remote_address)s " -#: nova/api/metadata/handler.py:189 +#: nova/api/metadata/handler.py:190 msgid "Invalid proxy request signature." msgstr "Firma de solicitud de proxy no válida." -#: nova/api/metadata/handler.py:196 nova/api/metadata/handler.py:203 +#: nova/api/metadata/handler.py:197 nova/api/metadata/handler.py:204 #, python-format msgid "Failed to get metadata for instance id: %s" msgstr "No se han podido obtener metadatos para el id de instancia: %s" -#: nova/api/metadata/handler.py:207 +#: nova/api/metadata/handler.py:208 #, python-format msgid "" "Tenant_id %(tenant_id)s does not match tenant_id of instance " @@ -2502,11 +2539,11 @@ msgstr "Capturado error: %s" msgid "%(url)s returned with HTTP %(status)d" msgstr "Se ha devuelto %(url)s con HTTP %(status)d" -#: nova/api/openstack/__init__.py:190 +#: nova/api/openstack/__init__.py:186 msgid "Must specify an ExtensionManager class" msgstr "Debe especificar una clase ExtensionManager" -#: nova/api/openstack/__init__.py:236 nova/api/openstack/__init__.py:410 +#: nova/api/openstack/__init__.py:232 nova/api/openstack/__init__.py:406 #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " @@ -2515,28 +2552,28 @@ msgstr "" "Ampliación %(ext_name)s: no se puede ampliar el recurso %(collection)s: " "no existe dicho recurso." -#: nova/api/openstack/__init__.py:283 +#: nova/api/openstack/__init__.py:279 #: nova/api/openstack/compute/plugins/v3/servers.py:99 #, python-format msgid "Not loading %s because it is in the blacklist" msgstr "No se ha cargado %s porque está en la lista negra" -#: nova/api/openstack/__init__.py:288 +#: nova/api/openstack/__init__.py:284 #: nova/api/openstack/compute/plugins/v3/servers.py:104 #, python-format msgid "Not loading %s because it is not in the whitelist" msgstr "No se ha cargado %s porque no está en la lista blanca" -#: nova/api/openstack/__init__.py:295 +#: nova/api/openstack/__init__.py:291 msgid "V3 API has been disabled by configuration" msgstr "" -#: nova/api/openstack/__init__.py:308 +#: nova/api/openstack/__init__.py:304 #, python-format msgid "Extensions in both blacklist and whitelist: %s" msgstr "Extensiones en lista restrictiva y lista permisiva: %s" -#: nova/api/openstack/__init__.py:332 +#: nova/api/openstack/__init__.py:328 #, python-format msgid "Missing core API extensions: %s" msgstr "Extensiones core API omitidas: %s" @@ -2576,61 +2613,53 @@ msgstr "el parámetro de límite debe ser positivo" msgid "offset param must be positive" msgstr "el parámetro de desplazamiento debe ser positivo" -#: nova/api/openstack/common.py:259 nova/api/openstack/compute/flavors.py:146 -#: nova/api/openstack/compute/servers.py:603 -#: nova/api/openstack/compute/plugins/v3/flavors.py:110 -#: nova/api/openstack/compute/plugins/v3/servers.py:280 -#, python-format -msgid "marker [%s] not found" -msgstr "no se ha encontrado el marcador [%s]" - -#: nova/api/openstack/common.py:299 +#: nova/api/openstack/common.py:276 #, python-format msgid "href %s does not contain version" msgstr "href %s no contiene la versión" -#: nova/api/openstack/common.py:314 +#: nova/api/openstack/common.py:291 msgid "Image metadata limit exceeded" msgstr "Se ha superado el límite de metadatos de imágenes" -#: nova/api/openstack/common.py:322 +#: nova/api/openstack/common.py:299 msgid "Image metadata key cannot be blank" msgstr "La clave de metadatos de imagen no puede estar en blanco" -#: nova/api/openstack/common.py:325 +#: nova/api/openstack/common.py:302 msgid "Image metadata key too long" msgstr "La clave de metadatos de imagen es demasiado larga" -#: nova/api/openstack/common.py:328 +#: nova/api/openstack/common.py:305 msgid "Invalid image metadata" msgstr "Metadatos de imagen no válidos " -#: nova/api/openstack/common.py:391 +#: nova/api/openstack/common.py:368 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "No se puede '%(action)s' mientras la instancia está en %(attr)s %(state)s" -#: nova/api/openstack/common.py:394 +#: nova/api/openstack/common.py:371 #, python-format msgid "Cannot '%s' an instance which has never been active" msgstr "No se puede '%s' una instancia que nunca ha estado activa" -#: nova/api/openstack/common.py:397 +#: nova/api/openstack/common.py:374 #, python-format msgid "Instance is in an invalid state for '%s'" msgstr "La instancia se encuentra en un estado inválido para '%s'" -#: nova/api/openstack/common.py:477 +#: nova/api/openstack/common.py:454 msgid "Rejecting snapshot request, snapshots currently disabled" msgstr "" "Rechazando solicitud de instantánea, instantáneas inhabilitadas " "actualmente" -#: nova/api/openstack/common.py:479 +#: nova/api/openstack/common.py:456 msgid "Instance snapshots are not permitted at this time." msgstr "Las instantáneas de instancia no están permitidas en este momento." -#: nova/api/openstack/common.py:600 +#: nova/api/openstack/common.py:577 msgid "Cells is not enabled." msgstr "Las celdas no están habilitadas." @@ -2774,6 +2803,14 @@ msgstr "Filtro minRam no válido [%s]" msgid "Invalid minDisk filter [%s]" msgstr "Filtro minDisk no válido [%s]" +#: nova/api/openstack/compute/flavors.py:146 +#: nova/api/openstack/compute/servers.py:603 +#: nova/api/openstack/compute/plugins/v3/flavors.py:110 +#: nova/api/openstack/compute/plugins/v3/servers.py:280 +#, python-format +msgid "marker [%s] not found" +msgstr "no se ha encontrado el marcador [%s]" + #: nova/api/openstack/compute/image_metadata.py:35 #: nova/api/openstack/compute/images.py:141 #: nova/api/openstack/compute/images.py:157 @@ -2787,7 +2824,7 @@ msgstr "Formato de cuerpo de solicitud incorrecto" #: nova/api/openstack/compute/image_metadata.py:82 #: nova/api/openstack/compute/server_metadata.py:79 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:108 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:85 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72 #: nova/api/openstack/compute/plugins/v3/server_metadata.py:77 msgid "Request body and URI mismatch" msgstr "Discrepancia de URI y cuerpo de solicitud" @@ -2795,7 +2832,6 @@ msgstr "Discrepancia de URI y cuerpo de solicitud" #: nova/api/openstack/compute/image_metadata.py:85 #: nova/api/openstack/compute/server_metadata.py:83 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:111 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:88 #: nova/api/openstack/compute/plugins/v3/server_metadata.py:81 msgid "Request body contains too many items" msgstr "El cuerpo de solicitud contiene demasiados elementos" @@ -2872,12 +2908,12 @@ msgstr "El sabor '%s' no se ha podido encontrar " #: nova/api/openstack/compute/servers.py:625 #: nova/api/openstack/compute/servers.py:772 -#: nova/api/openstack/compute/servers.py:1079 -#: nova/api/openstack/compute/servers.py:1199 -#: nova/api/openstack/compute/servers.py:1384 -#: nova/api/openstack/compute/plugins/v3/servers.py:615 -#: nova/api/openstack/compute/plugins/v3/servers.py:727 -#: nova/api/openstack/compute/plugins/v3/servers.py:846 +#: nova/api/openstack/compute/servers.py:1081 +#: nova/api/openstack/compute/servers.py:1203 +#: nova/api/openstack/compute/servers.py:1388 +#: nova/api/openstack/compute/plugins/v3/servers.py:617 +#: nova/api/openstack/compute/plugins/v3/servers.py:729 +#: nova/api/openstack/compute/plugins/v3/servers.py:848 msgid "Instance could not be found" msgstr "No se ha podido encontrar la instancia" @@ -2984,125 +3020,124 @@ msgstr "Se ha proporcionado un nombre de clave no válido." msgid "Invalid config_drive provided." msgstr "La config_drive proporcionada es inválida." -#: nova/api/openstack/compute/servers.py:1064 +#: nova/api/openstack/compute/servers.py:1066 msgid "HostId cannot be updated." msgstr "El ID de host no se puede actualizar. " -#: nova/api/openstack/compute/servers.py:1068 +#: nova/api/openstack/compute/servers.py:1070 msgid "Personality cannot be updated." msgstr "No se puede actualizar la personalidad." -#: nova/api/openstack/compute/servers.py:1094 -#: nova/api/openstack/compute/servers.py:1113 -#: nova/api/openstack/compute/plugins/v3/servers.py:626 -#: nova/api/openstack/compute/plugins/v3/servers.py:642 +#: nova/api/openstack/compute/servers.py:1096 +#: nova/api/openstack/compute/servers.py:1115 +#: nova/api/openstack/compute/plugins/v3/servers.py:628 +#: nova/api/openstack/compute/plugins/v3/servers.py:644 msgid "Instance has not been resized." msgstr "La instancia no se ha redimensionado." -#: nova/api/openstack/compute/servers.py:1116 -#: nova/api/openstack/compute/plugins/v3/servers.py:645 +#: nova/api/openstack/compute/servers.py:1118 +#: nova/api/openstack/compute/plugins/v3/servers.py:647 msgid "Flavor used by the instance could not be found." msgstr "No se ha podido encontrar el sabor utilizado por la instancia." -#: nova/api/openstack/compute/servers.py:1132 -#: nova/api/openstack/compute/plugins/v3/servers.py:659 +#: nova/api/openstack/compute/servers.py:1134 +#: nova/api/openstack/compute/plugins/v3/servers.py:661 msgid "Argument 'type' for reboot must be a string" msgstr "El argumento 'type' para reinicio debe ser una cadena" -#: nova/api/openstack/compute/servers.py:1138 -#: nova/api/openstack/compute/plugins/v3/servers.py:665 +#: nova/api/openstack/compute/servers.py:1140 +#: nova/api/openstack/compute/plugins/v3/servers.py:667 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "El argumento 'type' para el rearranque no es HARD o SOFT" -#: nova/api/openstack/compute/servers.py:1142 -#: nova/api/openstack/compute/plugins/v3/servers.py:669 +#: nova/api/openstack/compute/servers.py:1144 +#: nova/api/openstack/compute/plugins/v3/servers.py:671 msgid "Missing argument 'type' for reboot" msgstr "Falta el argumento 'type' para el rearranque" -#: nova/api/openstack/compute/servers.py:1169 -#: nova/api/openstack/compute/plugins/v3/servers.py:697 +#: nova/api/openstack/compute/servers.py:1171 +#: nova/api/openstack/compute/plugins/v3/servers.py:699 msgid "Unable to locate requested flavor." msgstr "No se puede ubicar el tipo solicitado." -#: nova/api/openstack/compute/servers.py:1172 -#: nova/api/openstack/compute/plugins/v3/servers.py:700 +#: nova/api/openstack/compute/servers.py:1174 +#: nova/api/openstack/compute/plugins/v3/servers.py:702 msgid "Resize requires a flavor change." msgstr "Redimensionar necesita un cambio de modelo. " -#: nova/api/openstack/compute/servers.py:1180 -#: nova/api/openstack/compute/plugins/v3/servers.py:708 +#: nova/api/openstack/compute/servers.py:1182 +#: nova/api/openstack/compute/plugins/v3/servers.py:710 msgid "You are not authorized to access the image the instance was started with." msgstr "" "No está autorizado a acceder a la imagen con la que se ha lanzado la " "instancia." -#: nova/api/openstack/compute/servers.py:1184 -#: nova/api/openstack/compute/plugins/v3/servers.py:712 +#: nova/api/openstack/compute/servers.py:1186 +#: nova/api/openstack/compute/plugins/v3/servers.py:714 msgid "Image that the instance was started with could not be found." msgstr "No se ha podido encontrar la imagen con la que se lanzó la instancia." -#: nova/api/openstack/compute/servers.py:1188 -#: nova/api/openstack/compute/plugins/v3/servers.py:716 +#: nova/api/openstack/compute/servers.py:1190 +#: nova/api/openstack/compute/plugins/v3/servers.py:718 msgid "Invalid instance image." msgstr "Imagen de instancia no válida." -#: nova/api/openstack/compute/servers.py:1211 +#: nova/api/openstack/compute/servers.py:1215 msgid "Missing imageRef attribute" msgstr "Falta el atributo imageRef" -#: nova/api/openstack/compute/servers.py:1216 -#: nova/api/openstack/compute/servers.py:1224 +#: nova/api/openstack/compute/servers.py:1220 +#: nova/api/openstack/compute/servers.py:1228 msgid "Invalid imageRef provided." msgstr "Se ha proporcionado una referencia de imagen no válida." -#: nova/api/openstack/compute/servers.py:1254 +#: nova/api/openstack/compute/servers.py:1258 msgid "Missing flavorRef attribute" msgstr "Falta el atributo flavorRef" -#: nova/api/openstack/compute/servers.py:1267 +#: nova/api/openstack/compute/servers.py:1271 msgid "No adminPass was specified" msgstr "No se ha especificado adminPass" -#: nova/api/openstack/compute/servers.py:1275 +#: nova/api/openstack/compute/servers.py:1279 #: nova/api/openstack/compute/plugins/v3/admin_password.py:56 msgid "Unable to set password on instance" msgstr "No se puede establecer contraseña en la instancia" -#: nova/api/openstack/compute/servers.py:1284 +#: nova/api/openstack/compute/servers.py:1288 msgid "Unable to parse metadata key/value pairs." msgstr "No se han podido analizar pares de clave/valor de metadatos." -#: nova/api/openstack/compute/servers.py:1297 +#: nova/api/openstack/compute/servers.py:1301 msgid "Resize request has invalid 'flavorRef' attribute." msgstr "" "La solicitud de redimensionamiento tiene un atributo 'flavorRef' no " "válido." -#: nova/api/openstack/compute/servers.py:1300 +#: nova/api/openstack/compute/servers.py:1304 msgid "Resize requests require 'flavorRef' attribute." msgstr "Las solicitudes de redimensionamiento necesitan el atributo 'flavorRef'. " -#: nova/api/openstack/compute/servers.py:1320 +#: nova/api/openstack/compute/servers.py:1324 msgid "Could not parse imageRef from request." msgstr "No se ha podido analizar imageRef de la solicitud. " -#: nova/api/openstack/compute/servers.py:1390 -#: nova/api/openstack/compute/plugins/v3/servers.py:852 +#: nova/api/openstack/compute/servers.py:1394 +#: nova/api/openstack/compute/plugins/v3/servers.py:854 msgid "Cannot find image for rebuild" msgstr "No se puede encontrar la imagen para reconstrucción " -#: nova/api/openstack/compute/servers.py:1423 +#: nova/api/openstack/compute/servers.py:1427 msgid "createImage entity requires name attribute" msgstr "La entidad createImage necesita el atributo de nombre" -#: nova/api/openstack/compute/servers.py:1432 -#: nova/api/openstack/compute/contrib/admin_actions.py:286 -#: nova/api/openstack/compute/plugins/v3/create_backup.py:85 -#: nova/api/openstack/compute/plugins/v3/servers.py:892 +#: nova/api/openstack/compute/servers.py:1436 +#: nova/api/openstack/compute/contrib/admin_actions.py:288 +#: nova/api/openstack/compute/plugins/v3/servers.py:894 msgid "Invalid metadata" msgstr "Metadatos no válidos" -#: nova/api/openstack/compute/servers.py:1490 +#: nova/api/openstack/compute/servers.py:1494 msgid "Invalid adminPass" msgstr "adminPass no válido " @@ -3110,11 +3145,11 @@ msgstr "adminPass no válido " #: nova/api/openstack/compute/contrib/admin_actions.py:88 #: nova/api/openstack/compute/contrib/admin_actions.py:113 #: nova/api/openstack/compute/contrib/admin_actions.py:135 -#: nova/api/openstack/compute/contrib/admin_actions.py:176 -#: nova/api/openstack/compute/contrib/admin_actions.py:195 -#: nova/api/openstack/compute/contrib/admin_actions.py:214 -#: nova/api/openstack/compute/contrib/admin_actions.py:233 -#: nova/api/openstack/compute/contrib/admin_actions.py:391 +#: nova/api/openstack/compute/contrib/admin_actions.py:178 +#: nova/api/openstack/compute/contrib/admin_actions.py:197 +#: nova/api/openstack/compute/contrib/admin_actions.py:216 +#: nova/api/openstack/compute/contrib/admin_actions.py:235 +#: nova/api/openstack/compute/contrib/admin_actions.py:393 #: nova/api/openstack/compute/contrib/multinic.py:43 #: nova/api/openstack/compute/contrib/rescue.py:45 #: nova/api/openstack/compute/contrib/shelve.py:43 @@ -3122,6 +3157,8 @@ msgid "Server not found" msgstr "Servidor no encontrado" #: nova/api/openstack/compute/contrib/admin_actions.py:66 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 msgid "Virt driver does not implement pause function." msgstr "El controlador Virt no implementa la función de pausa." @@ -3149,56 +3186,55 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:163 +#: nova/api/openstack/compute/contrib/admin_actions.py:165 #, python-format msgid "Error in migrate %s" msgstr "Error al migrar %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:182 +#: nova/api/openstack/compute/contrib/admin_actions.py:184 #, python-format msgid "Compute.api::reset_network %s" msgstr "Compute.api::reset_network %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:201 +#: nova/api/openstack/compute/contrib/admin_actions.py:203 #, python-format msgid "Compute.api::inject_network_info %s" msgstr "Compute.api::inject_network_info %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:218 +#: nova/api/openstack/compute/contrib/admin_actions.py:220 #, python-format msgid "Compute.api::lock %s" msgstr "Compute.api::lock %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:237 +#: nova/api/openstack/compute/contrib/admin_actions.py:239 #, python-format msgid "Compute.api::unlock %s" msgstr "Compute.api::unlock %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:263 +#: nova/api/openstack/compute/contrib/admin_actions.py:265 #, python-format msgid "createBackup entity requires %s attribute" msgstr "La entidad createBackup necesita el atributo %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:267 +#: nova/api/openstack/compute/contrib/admin_actions.py:269 msgid "Malformed createBackup entity" msgstr "Entidad createBackup formada incorrectamente" -#: nova/api/openstack/compute/contrib/admin_actions.py:273 +#: nova/api/openstack/compute/contrib/admin_actions.py:275 msgid "createBackup attribute 'rotation' must be an integer" msgstr "La 'rotación' del atributo createBackup debe ser un entero" -#: nova/api/openstack/compute/contrib/admin_actions.py:276 +#: nova/api/openstack/compute/contrib/admin_actions.py:278 msgid "createBackup attribute 'rotation' must be greater than or equal to zero" msgstr "El atributo de createBackup 'rotation' debe ser mayor que o igual a cero" -#: nova/api/openstack/compute/contrib/admin_actions.py:292 -#: nova/api/openstack/compute/contrib/console_output.py:45 +#: nova/api/openstack/compute/contrib/admin_actions.py:294 +#: nova/api/openstack/compute/contrib/console_output.py:46 #: nova/api/openstack/compute/contrib/server_start_stop.py:40 msgid "Instance not found" msgstr "No se ha encontrado la instancia " -#: nova/api/openstack/compute/contrib/admin_actions.py:323 -#: nova/api/openstack/compute/plugins/v3/migrate_server.py:80 +#: nova/api/openstack/compute/contrib/admin_actions.py:325 msgid "" "host, block_migration and disk_over_commit must be specified for live " "migration." @@ -3206,74 +3242,59 @@ msgstr "" "host, block_migration y disk_over_commit deben especificarse para " "migración en vivo." -#: nova/api/openstack/compute/contrib/admin_actions.py:360 +#: nova/api/openstack/compute/contrib/admin_actions.py:362 #, python-format msgid "Live migration of instance %s to another host failed" msgstr "Ha fallado la migración en vivo de la instancia %s a otro host" -#: nova/api/openstack/compute/contrib/admin_actions.py:363 +#: nova/api/openstack/compute/contrib/admin_actions.py:365 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "La migración en directo de la instancia %(id)s al host %(host)s ha fallado" -#: nova/api/openstack/compute/contrib/admin_actions.py:381 +#: nova/api/openstack/compute/contrib/admin_actions.py:383 #: nova/api/openstack/compute/plugins/v3/admin_actions.py:83 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "Se debe especificar el estado deseado. Los estados válidos son: %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:395 +#: nova/api/openstack/compute/contrib/admin_actions.py:397 #, python-format msgid "Compute.api::resetState %s" msgstr "Compute.api::resetState %s" -#: nova/api/openstack/compute/contrib/aggregates.py:99 -#, python-format -msgid "Cannot show aggregate: %s" -msgstr "No se puede mostrar el agregado: %s" - -#: nova/api/openstack/compute/contrib/aggregates.py:137 -#, python-format -msgid "Cannot update aggregate: %s" -msgstr "No se puede actualizar el agregado: %s" - -#: nova/api/openstack/compute/contrib/aggregates.py:151 -#, python-format -msgid "Cannot delete aggregate: %s" -msgstr "No se puede eliminar el agregado: %s" - -#: nova/api/openstack/compute/contrib/aggregates.py:162 +#: nova/api/openstack/compute/contrib/aggregates.py:161 #, python-format msgid "Aggregates does not have %s action" msgstr "Los agregados no tienen la acción %s " -#: nova/api/openstack/compute/contrib/aggregates.py:166 +#: nova/api/openstack/compute/contrib/aggregates.py:165 #: nova/api/openstack/compute/contrib/flavormanage.py:55 #: nova/api/openstack/compute/contrib/keypairs.py:86 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:167 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:169 msgid "Invalid request body" msgstr "Cuerpo de solicitud no válido" -#: nova/api/openstack/compute/contrib/aggregates.py:176 -#: nova/api/openstack/compute/contrib/aggregates.py:181 +#: nova/api/openstack/compute/contrib/aggregates.py:175 +#: nova/api/openstack/compute/contrib/aggregates.py:180 #, python-format msgid "Cannot add host %(host)s in aggregate %(id)s" msgstr "No se puede añadir el host %(host)s en el agregado %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:195 -#: nova/api/openstack/compute/contrib/aggregates.py:199 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:151 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:155 +#: nova/api/openstack/compute/contrib/aggregates.py:194 +#: nova/api/openstack/compute/contrib/aggregates.py:198 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:153 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:157 #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "No se puede eliminar el host %(host)s en el agregado %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:218 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:175 +#: nova/api/openstack/compute/contrib/aggregates.py:217 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:177 msgid "The value of metadata must be a dict" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:230 +#: nova/api/openstack/compute/contrib/aggregates.py:229 #, python-format msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "No se pueden establecer metadatos %(metadata)s en el agregado %(id)s" @@ -3296,7 +3317,7 @@ msgstr "Conectar interfaz" #: nova/api/openstack/compute/contrib/attach_interfaces.py:119 #: nova/api/openstack/compute/contrib/attach_interfaces.py:154 #: nova/api/openstack/compute/contrib/attach_interfaces.py:177 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:166 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:169 msgid "Network driver does not support this function." msgstr "El controlador de red no soporta esta función." @@ -3305,12 +3326,12 @@ msgid "Failed to attach interface" msgstr "Se ha encontrado un error al conectar la interfaz." #: nova/api/openstack/compute/contrib/attach_interfaces.py:130 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:128 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:131 msgid "Attachments update is not supported" msgstr "La actualización de dispositivos conectados no está soportada" #: nova/api/openstack/compute/contrib/attach_interfaces.py:142 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:139 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:142 #, python-format msgid "Detach interface %s" msgstr "Desconectar interfaz %s" @@ -3389,21 +3410,21 @@ msgstr "Token no encontrado" msgid "The requested console type details are not accessible" msgstr "Los detalles del tipo de consola solicitada no son accesibles" -#: nova/api/openstack/compute/contrib/console_output.py:51 +#: nova/api/openstack/compute/contrib/console_output.py:52 msgid "os-getConsoleOutput malformed or missing from request body" msgstr "" "os-getConsoleOutput formada incorrectamente u omitida en el cuerpo de " "solicitud" -#: nova/api/openstack/compute/contrib/console_output.py:62 +#: nova/api/openstack/compute/contrib/console_output.py:63 msgid "Length in request body must be an integer value" msgstr "La longitud del cuerpo de solicitud debe ser un valor entero " -#: nova/api/openstack/compute/contrib/console_output.py:70 +#: nova/api/openstack/compute/contrib/console_output.py:71 msgid "Unable to get console" msgstr "No se puede obtener consola " -#: nova/api/openstack/compute/contrib/console_output.py:75 +#: nova/api/openstack/compute/contrib/console_output.py:76 #: nova/api/openstack/compute/plugins/v3/console_output.py:60 msgid "Unable to get console log, functionality not implemented" msgstr "" @@ -3415,17 +3436,17 @@ msgid "Instance not yet ready" msgstr "La instancia aún no está preparada" #: nova/api/openstack/compute/contrib/consoles.py:52 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:62 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:60 msgid "Unable to get vnc console, functionality not implemented" msgstr "Incapaz de obtener consola vnc, funcionalidad no implementada" #: nova/api/openstack/compute/contrib/consoles.py:76 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:93 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:89 msgid "Unable to get spice console, functionality not implemented" msgstr "Incapaz de obtener la consola spice, funcionalidad no implementada" #: nova/api/openstack/compute/contrib/consoles.py:101 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:127 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:121 msgid "Unable to get rdp console, functionality not implemented" msgstr "Incapaz de obtener consola rdp, funcionalidad no implementada" @@ -3477,8 +3498,12 @@ msgstr "La lista de acceso no está disponible para sabores públicos. " msgid "No request body" msgstr "Ningún cuerpo de solicitud " +#: nova/api/openstack/compute/contrib/flavor_access.py:170 +#: nova/api/openstack/compute/contrib/flavor_access.py:194 +msgid "Missing tenant parameter" +msgstr "" + #: nova/api/openstack/compute/contrib/flavorextraspecs.py:56 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:42 msgid "No Request Body" msgstr "Ningún cuerpo de solicitud" @@ -3488,8 +3513,8 @@ msgstr "Se han proporcionado extra_specs incorrectas" #: nova/api/openstack/compute/contrib/flavorextraspecs.py:134 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:150 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:113 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:132 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:96 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:115 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" @@ -3499,7 +3524,7 @@ msgid "DNS entries not found." msgstr "No se han encontrado entradas DNS." #: nova/api/openstack/compute/contrib/floating_ips.py:129 -#: nova/api/openstack/compute/contrib/floating_ips.py:177 +#: nova/api/openstack/compute/contrib/floating_ips.py:183 #, python-format msgid "Floating ip not found for id %s" msgstr "No se ha encontrado la IP flotante para el id %s." @@ -3513,51 +3538,60 @@ msgstr "No hay más IP flotantes en la agrupación %s." msgid "No more floating ips available." msgstr "No hay más IP flotantes disponibles." -#: nova/api/openstack/compute/contrib/floating_ips.py:218 -#: nova/api/openstack/compute/contrib/floating_ips.py:283 -#: nova/api/openstack/compute/contrib/security_groups.py:481 +#: nova/api/openstack/compute/contrib/floating_ips.py:168 +#, python-format +msgid "IP allocation over quota in pool %s." +msgstr "" + +#: nova/api/openstack/compute/contrib/floating_ips.py:170 +msgid "IP allocation over quota." +msgstr "" + +#: nova/api/openstack/compute/contrib/floating_ips.py:220 +#: nova/api/openstack/compute/contrib/floating_ips.py:285 +#: nova/api/openstack/compute/contrib/security_groups.py:482 msgid "Missing parameter dict" msgstr "Falta el parámetro dict " -#: nova/api/openstack/compute/contrib/floating_ips.py:221 -#: nova/api/openstack/compute/contrib/floating_ips.py:286 +#: nova/api/openstack/compute/contrib/floating_ips.py:223 +#: nova/api/openstack/compute/contrib/floating_ips.py:288 msgid "Address not specified" msgstr "Dirección no especificada " -#: nova/api/openstack/compute/contrib/floating_ips.py:227 +#: nova/api/openstack/compute/contrib/floating_ips.py:229 msgid "No nw_info cache associated with instance" msgstr "No hay memoria caché nw_info asociada con la instancia " -#: nova/api/openstack/compute/contrib/floating_ips.py:232 +#: nova/api/openstack/compute/contrib/floating_ips.py:234 msgid "No fixed ips associated to instance" msgstr "No hay IP fijas asociadas a la instancia " -#: nova/api/openstack/compute/contrib/floating_ips.py:243 +#: nova/api/openstack/compute/contrib/floating_ips.py:245 msgid "Specified fixed address not assigned to instance" msgstr "Dirección fija especificada no asignada a la instancia" -#: nova/api/openstack/compute/contrib/floating_ips.py:257 +#: nova/api/openstack/compute/contrib/floating_ips.py:259 msgid "floating ip is already associated" msgstr "La IP flotante ya está asociada" -#: nova/api/openstack/compute/contrib/floating_ips.py:260 +#: nova/api/openstack/compute/contrib/floating_ips.py:262 msgid "l3driver call to add floating ip failed" msgstr "La llamada l3driver para añadir IP flotante ha fallado" -#: nova/api/openstack/compute/contrib/floating_ips.py:263 -#: nova/api/openstack/compute/contrib/floating_ips.py:294 +#: nova/api/openstack/compute/contrib/floating_ips.py:265 +#: nova/api/openstack/compute/contrib/floating_ips.py:296 msgid "floating ip not found" msgstr "No se ha encontrado IP flotante" -#: nova/api/openstack/compute/contrib/floating_ips.py:268 +#: nova/api/openstack/compute/contrib/floating_ips.py:270 msgid "Error. Unable to associate floating ip" msgstr "Error. No se puede asociar IP flotante" -#: nova/api/openstack/compute/contrib/floating_ips.py:309 +#: nova/api/openstack/compute/contrib/floating_ips.py:311 msgid "Floating ip is not associated" msgstr "La ip flotante no está asociada " -#: nova/api/openstack/compute/contrib/floating_ips.py:313 +#: nova/api/openstack/compute/contrib/floating_ips.py:315 #, python-format msgid "Floating ip %(address)s is not associated with instance %(id)s." msgstr "" @@ -3583,63 +3617,59 @@ msgid "fping utility is not found." msgstr "No se encuentra el programa de utilidad fping." #: nova/api/openstack/compute/contrib/hosts.py:183 -#: nova/api/openstack/compute/plugins/v3/hosts.py:128 #, python-format msgid "Invalid update setting: '%s'" msgstr "Valor de actualización no válido: '%s' " #: nova/api/openstack/compute/contrib/hosts.py:186 -#: nova/api/openstack/compute/plugins/v3/hosts.py:131 #, python-format msgid "Invalid status: '%s'" msgstr "Estado no válido: '%s' " #: nova/api/openstack/compute/contrib/hosts.py:188 -#: nova/api/openstack/compute/plugins/v3/hosts.py:133 #, python-format msgid "Invalid mode: '%s'" msgstr "Modalidad no válida: '%s' " #: nova/api/openstack/compute/contrib/hosts.py:190 -#: nova/api/openstack/compute/plugins/v3/hosts.py:135 msgid "'status' or 'maintenance_mode' needed for host update" msgstr "Se necesita 'status' o 'maintenance_mode' para actualización de host" #: nova/api/openstack/compute/contrib/hosts.py:206 -#: nova/api/openstack/compute/plugins/v3/hosts.py:152 +#: nova/api/openstack/compute/plugins/v3/hosts.py:134 #, python-format msgid "Putting host %(host_name)s in maintenance mode %(mode)s." msgstr "Poniendo el host %(host_name)s en modalidad de mantenimiento %(mode)s." #: nova/api/openstack/compute/contrib/hosts.py:212 -#: nova/api/openstack/compute/plugins/v3/hosts.py:158 +#: nova/api/openstack/compute/plugins/v3/hosts.py:140 msgid "Virt driver does not implement host maintenance mode." msgstr "El controlador virt no implementa la modalidad de mantenimiento de host." #: nova/api/openstack/compute/contrib/hosts.py:227 -#: nova/api/openstack/compute/plugins/v3/hosts.py:174 +#: nova/api/openstack/compute/plugins/v3/hosts.py:156 #, python-format msgid "Enabling host %s." msgstr "Habilitando el host %s." #: nova/api/openstack/compute/contrib/hosts.py:229 -#: nova/api/openstack/compute/plugins/v3/hosts.py:176 +#: nova/api/openstack/compute/plugins/v3/hosts.py:158 #, python-format msgid "Disabling host %s." msgstr "Inhabilitando el host %s." #: nova/api/openstack/compute/contrib/hosts.py:234 -#: nova/api/openstack/compute/plugins/v3/hosts.py:181 +#: nova/api/openstack/compute/plugins/v3/hosts.py:163 msgid "Virt driver does not implement host disabled status." msgstr "El controlador virt no implementa el estado inhabilitado de host." #: nova/api/openstack/compute/contrib/hosts.py:250 -#: nova/api/openstack/compute/plugins/v3/hosts.py:199 +#: nova/api/openstack/compute/plugins/v3/hosts.py:181 msgid "Virt driver does not implement host power management." msgstr "El controlador virt no implementa la gestión de alimentación de host." #: nova/api/openstack/compute/contrib/hosts.py:336 -#: nova/api/openstack/compute/plugins/v3/hosts.py:292 +#: nova/api/openstack/compute/plugins/v3/hosts.py:274 msgid "Describe-resource is admin only functionality" msgstr "El recurso de descripción es funcionalidad sólo de administrador" @@ -3837,7 +3867,7 @@ msgid "Malformed scheduler_hints attribute" msgstr "Atributo scheduler_hints formado incorrectamente" #: nova/api/openstack/compute/contrib/security_group_default_rules.py:127 -#: nova/api/openstack/compute/contrib/security_groups.py:386 +#: nova/api/openstack/compute/contrib/security_groups.py:387 msgid "Not enough parameters to build a valid rule." msgstr "No hay suficientes parámetros para crear una regla válida." @@ -3849,16 +3879,16 @@ msgstr "Esta regla predeterminada ya existe." msgid "security group default rule not found" msgstr "regla predeterminada de grupo de seguridad no encontrada" -#: nova/api/openstack/compute/contrib/security_groups.py:394 +#: nova/api/openstack/compute/contrib/security_groups.py:395 #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Prefijo erróneo para red en cidr %s" -#: nova/api/openstack/compute/contrib/security_groups.py:484 +#: nova/api/openstack/compute/contrib/security_groups.py:485 msgid "Security group not specified" msgstr "Grupo de seguridad no especificado" -#: nova/api/openstack/compute/contrib/security_groups.py:488 +#: nova/api/openstack/compute/contrib/security_groups.py:489 msgid "Security group name cannot be empty" msgstr "El nombre de grupo de seguridad no puede estar vacío" @@ -3891,39 +3921,39 @@ msgstr "Cear evento %(name)s:%(tag)s para la instancia %(instance_uuid)s" msgid "No instances found for any event" msgstr "No se han encontrado instancias en cualquier evento" -#: nova/api/openstack/compute/contrib/server_groups.py:162 +#: nova/api/openstack/compute/contrib/server_groups.py:161 msgid "Conflicting policies configured!" msgstr "Políticas conflictivas configuradas!" -#: nova/api/openstack/compute/contrib/server_groups.py:167 +#: nova/api/openstack/compute/contrib/server_groups.py:166 #, python-format msgid "Invalid policies: %s" msgstr "Políticas inválidas: %s" -#: nova/api/openstack/compute/contrib/server_groups.py:172 +#: nova/api/openstack/compute/contrib/server_groups.py:171 msgid "Duplicate policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:177 +#: nova/api/openstack/compute/contrib/server_groups.py:176 msgid "the body is invalid." msgstr "El cuerpo es inválido." -#: nova/api/openstack/compute/contrib/server_groups.py:186 +#: nova/api/openstack/compute/contrib/server_groups.py:185 #, python-format msgid "'%s' is either missing or empty." msgstr "'%s' no se encuentra o está vacío." -#: nova/api/openstack/compute/contrib/server_groups.py:192 +#: nova/api/openstack/compute/contrib/server_groups.py:191 #, python-format msgid "Invalid format for name: '%s'" msgstr "Formato inválido para el nombre: '%s'" -#: nova/api/openstack/compute/contrib/server_groups.py:200 +#: nova/api/openstack/compute/contrib/server_groups.py:199 #, python-format msgid "'%s' is not a list" msgstr "'%s' no es una lista" -#: nova/api/openstack/compute/contrib/server_groups.py:204 +#: nova/api/openstack/compute/contrib/server_groups.py:203 #, python-format msgid "unsupported fields: %s" msgstr "Campos no soportados: %s" @@ -3952,11 +3982,11 @@ msgstr "Atributo no válido en la solicitud" msgid "Missing disabled reason field" msgstr "Campo disabled reason omitido." -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:231 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:230 msgid "Datetime is in invalid format" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:250 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:249 msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Hora de inicio no válida. La hora de inicio no pude tener lugar después " @@ -4033,11 +4063,11 @@ msgstr "access_ip_v4 no tiene el formato IPv4 apropiado" msgid "access_ip_v6 is not proper IPv6 format" msgstr "access_ip_v6 no tiene el formato IPv6 apropiado" -#: nova/api/openstack/compute/plugins/v3/aggregates.py:170 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:172 msgid "Invalid request format for metadata" msgstr "Formato de solicitud inválido para metadatos" -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:103 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:106 #, python-format msgid "Attach interface to %s" msgstr "Asociar interfaz a %s" @@ -4051,23 +4081,6 @@ msgstr "No existe Cell %s." msgid "token not provided" msgstr "token no proporcionado" -#: nova/api/openstack/compute/plugins/v3/create_backup.py:62 -#, python-format -msgid "create_backup entity requires %s attribute" -msgstr "La entidad create_backup necesita el atributo %s" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:66 -msgid "Malformed create_backup entity" -msgstr "Entidad create_backup mal formada" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:72 -msgid "create_backup attribute 'rotation' must be an integer" -msgstr "El atributo 'rotation' de create_backup debe ser un entero" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:75 -msgid "create_backup attribute 'rotation' must be greater than or equal to zero" -msgstr "El atributo 'rotation' en create_backup debe ser mayor qué o igual a cero" - #: nova/api/openstack/compute/plugins/v3/extended_volumes.py:98 msgid "The volume was either invalid or not attached to the instance." msgstr "El volumen es inválido o no está asociado a la instancia." @@ -4095,19 +4108,6 @@ msgstr "Filtro min_ram [%s] no válido" msgid "Invalid min_disk filter [%s]" msgstr "Filtro min_disk inválido [%s]" -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:66 -msgid "No or bad extra_specs provided" -msgstr "extra_specs erróneas o no proporcionadas" - -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:73 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:95 -msgid "Concurrent transaction has been committed, try again" -msgstr "La transacción concurrente ha sido entregada, intente nuevamente." - -#: nova/api/openstack/compute/plugins/v3/hosts.py:120 -msgid "The request body invalid" -msgstr "El contenido de la solicitud es inválido." - #: nova/api/openstack/compute/plugins/v3/hypervisors.py:125 msgid "Need parameter 'query' to specify which hypervisor to filter on" msgstr "" @@ -4138,7 +4138,7 @@ msgstr "" "asignada." #: nova/api/openstack/compute/plugins/v3/servers.py:412 -#: nova/api/openstack/compute/plugins/v3/servers.py:585 +#: nova/api/openstack/compute/plugins/v3/servers.py:587 msgid "The request body is invalid" msgstr "El cuerpo solicitado es inválido" @@ -4147,41 +4147,41 @@ msgstr "El cuerpo solicitado es inválido" msgid "Invalid flavor_ref provided." msgstr "Se ha proporcionado un flavor_ref inválido." -#: nova/api/openstack/compute/plugins/v3/servers.py:596 +#: nova/api/openstack/compute/plugins/v3/servers.py:598 msgid "host_id cannot be updated." msgstr "No se puede actualizar host_id." -#: nova/api/openstack/compute/plugins/v3/servers.py:741 +#: nova/api/openstack/compute/plugins/v3/servers.py:743 msgid "Invalid image_ref provided." msgstr "La image_ref proporcionada es inválida." -#: nova/api/openstack/compute/plugins/v3/servers.py:760 +#: nova/api/openstack/compute/plugins/v3/servers.py:762 msgid "Missing image_ref attribute" msgstr "Atributo image_ref ausente" -#: nova/api/openstack/compute/plugins/v3/servers.py:767 +#: nova/api/openstack/compute/plugins/v3/servers.py:769 msgid "Missing flavor_ref attribute" msgstr "Atributo flavor_ref ausente." -#: nova/api/openstack/compute/plugins/v3/servers.py:780 +#: nova/api/openstack/compute/plugins/v3/servers.py:782 msgid "Resize request has invalid 'flavor_ref' attribute." msgstr "" "La solicitud de modifiación de tamaño tiene el atributo 'flavor_ref' " "inválido." -#: nova/api/openstack/compute/plugins/v3/servers.py:783 +#: nova/api/openstack/compute/plugins/v3/servers.py:785 msgid "Resize requests require 'flavor_ref' attribute." msgstr "La solicitud de modificación de tamaño requiere el atributo 'flavor_ref'." -#: nova/api/openstack/compute/plugins/v3/servers.py:799 +#: nova/api/openstack/compute/plugins/v3/servers.py:801 msgid "Could not parse image_ref from request." msgstr "No se puede validar image_ref en la solicitud." -#: nova/api/openstack/compute/plugins/v3/servers.py:883 +#: nova/api/openstack/compute/plugins/v3/servers.py:885 msgid "create_image entity requires name attribute" msgstr "La entidad create_image requiere el atributo nombre." -#: nova/api/openstack/compute/plugins/v3/servers.py:945 +#: nova/api/openstack/compute/plugins/v3/servers.py:947 msgid "Invalid admin_password" msgstr "admin_password inválido" @@ -4193,14 +4193,14 @@ msgstr "Disabled reason contiene caracteres inválidos o es demasiado larga." msgid "Instance has had its instance_type removed from the DB" msgstr "En la instancia se ha eliminado el tipo de instancia de la base de datos" -#: nova/api/validation/validators.py:61 +#: nova/api/validation/validators.py:62 #, python-format msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Conenido inválido para el campo/atributo %(path)s. Valor: %(value)s. " "%(message)s" -#: nova/cells/manager.py:78 +#: nova/cells/manager.py:79 msgid "" "The cells feature of Nova is considered experimental by the OpenStack " "project because it receives much less testing than the rest of Nova. This" @@ -4259,12 +4259,12 @@ msgstr "Error al esperar respuestas de células vecinas: %(exc)s" msgid "Unknown method '%(method)s' in compute API" msgstr "Método desconocido '%(method)s' en API de cálculo" -#: nova/cells/messaging.py:1096 +#: nova/cells/messaging.py:1103 #, python-format msgid "Got message to create instance fault: %(instance_fault)s" msgstr "Se ha obtenido mensaje para crear error de instancia: %(instance_fault)s" -#: nova/cells/messaging.py:1119 +#: nova/cells/messaging.py:1126 #, python-format msgid "" "Forcing a sync of instances, project_id=%(projid_str)s, " @@ -4273,30 +4273,30 @@ msgstr "" "Forzando una sincronización de instancias, project_id=%(projid_str)s, " "updated_since=%(since_str)s" -#: nova/cells/messaging.py:1198 +#: nova/cells/messaging.py:1205 #, python-format msgid "No match when trying to update BDM: %(bdm)s" msgstr "No se encontró resultado al intentar actualizar BDM: %(bdm)s" -#: nova/cells/messaging.py:1673 +#: nova/cells/messaging.py:1680 #, python-format msgid "No cell_name for %(method)s() from API" msgstr "No hay cell_name para %(method)s() desde la API" -#: nova/cells/messaging.py:1690 +#: nova/cells/messaging.py:1697 msgid "No cell_name for instance update from API" msgstr "No hay cell_name para actualización de instancia desde la API" -#: nova/cells/messaging.py:1853 +#: nova/cells/messaging.py:1860 #, python-format msgid "Returning exception %s to caller" msgstr "Devolviendo excepción %s al interlocutor" -#: nova/cells/rpcapi.py:369 +#: nova/cells/rpcapi.py:378 msgid "Failed to notify cells of BDM update/create." msgstr "Fallo al notificar las celdas de actualización/creación de BDM." -#: nova/cells/rpcapi.py:385 +#: nova/cells/rpcapi.py:394 msgid "Failed to notify cells of BDM destroy." msgstr "Fallo al notiifcar las celdas de destrucción de BDM" @@ -4372,73 +4372,73 @@ msgstr "Mascara de red a insertar en la configuración de openvpn" msgid "Failed to load %s" msgstr "Ha fallado la carga de %s" -#: nova/cmd/baremetal_deploy_helper.py:211 +#: nova/cmd/baremetal_deploy_helper.py:210 #, python-format msgid "parent device '%s' not found" msgstr "El dispositivo principal '%s' no se ha encontrado" -#: nova/cmd/baremetal_deploy_helper.py:214 +#: nova/cmd/baremetal_deploy_helper.py:213 #, python-format msgid "root device '%s' not found" msgstr "No se ha encontrado el dispositivo raíz '%s'" -#: nova/cmd/baremetal_deploy_helper.py:216 +#: nova/cmd/baremetal_deploy_helper.py:215 #, python-format msgid "swap device '%s' not found" msgstr "No se ha encontrado el dispositivo de swap '%s'" -#: nova/cmd/baremetal_deploy_helper.py:218 +#: nova/cmd/baremetal_deploy_helper.py:217 #, python-format msgid "ephemeral device '%s' not found" msgstr "Dispositivo efímero '%s' no encontrado" -#: nova/cmd/baremetal_deploy_helper.py:228 +#: nova/cmd/baremetal_deploy_helper.py:227 msgid "Failed to detect root device UUID." msgstr "Ha fallado la detección del dispositivo raíz UUID." -#: nova/cmd/baremetal_deploy_helper.py:252 +#: nova/cmd/baremetal_deploy_helper.py:251 #, python-format msgid "Cmd : %s" msgstr "Cmd : %s" -#: nova/cmd/baremetal_deploy_helper.py:253 +#: nova/cmd/baremetal_deploy_helper.py:252 #, python-format msgid "StdOut : %r" msgstr "StdOut : %r" -#: nova/cmd/baremetal_deploy_helper.py:254 +#: nova/cmd/baremetal_deploy_helper.py:253 #, python-format msgid "StdErr : %r" msgstr "StdErr : %r" -#: nova/cmd/baremetal_deploy_helper.py:282 +#: nova/cmd/baremetal_deploy_helper.py:281 #, python-format msgid "start deployment for node %(node_id)s, params %(params)s" msgstr "" "Se ha iniciado el despliegue del nodo %(node_id)s con parámetros " "%(params)s" -#: nova/cmd/baremetal_deploy_helper.py:291 +#: nova/cmd/baremetal_deploy_helper.py:290 #, python-format msgid "deployment to node %s failed" msgstr "El despligue hacia el nodo %s ha fallado" -#: nova/cmd/baremetal_deploy_helper.py:295 +#: nova/cmd/baremetal_deploy_helper.py:294 #, python-format msgid "deployment to node %s done" msgstr "despliegue hacia el nodo %s completo" -#: nova/cmd/baremetal_deploy_helper.py:317 +#: nova/cmd/baremetal_deploy_helper.py:316 #, python-format msgid "post: environ=%s" msgstr "enviar: environ=%s" -#: nova/cmd/baremetal_deploy_helper.py:336 +#: nova/cmd/baremetal_deploy_helper.py:335 #, python-format msgid "Deploy agent error message: %s" msgstr "Mensaje de error del agente de despliegue: %s" -#: nova/cmd/baremetal_deploy_helper.py:360 +#: nova/cmd/baremetal_deploy_helper.py:359 #, python-format msgid "request is queued: node %(node_id)s, params %(params)s" msgstr "solicitud encolada: nodo %(node_id)s, parámetros %(params)s" @@ -4568,40 +4568,40 @@ msgstr "" "ERROR: comandos de red no están soportados al utilizar la API neutron. " "Utiliza python-neutronclient en su lugar." -#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:217 +#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:218 msgid "id" msgstr "id" -#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:218 +#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:219 msgid "IPv4" msgstr "IPv4" -#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:219 +#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:220 msgid "IPv6" msgstr "IPv6" -#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:220 +#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:221 msgid "start address" msgstr "dirección de inicio" -#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:221 +#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:222 msgid "DNS1" msgstr "DNS1" -#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:222 +#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:223 msgid "DNS2" msgstr "DNS2" -#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:223 +#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:224 msgid "VlanID" msgstr "ID de Vlan" #: nova/cmd/manage.py:558 nova/cmd/manage.py:665 -#: nova/tests/test_nova_manage.py:224 +#: nova/tests/test_nova_manage.py:225 msgid "project" msgstr "proyecto" -#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:225 +#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:226 msgid "uuid" msgstr "uuid" @@ -4812,16 +4812,16 @@ msgstr "No hay entradas de nova en el registro de sistema!" msgid "No db access allowed in nova-network: %s" msgstr "No se permite acceso a base de datos en nova-network: %s" -#: nova/compute/api.py:362 +#: nova/compute/api.py:353 msgid "Cannot run any more instances of this type." msgstr "No se pueden ejecutar más instancias de este tipo. " -#: nova/compute/api.py:369 +#: nova/compute/api.py:360 #, python-format msgid "Can only run %s more instances of this type." msgstr "Sólo se pueden ejecutar %s instancias más de este tipo. " -#: nova/compute/api.py:381 +#: nova/compute/api.py:372 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d " @@ -4830,7 +4830,7 @@ msgstr "" "Cuota %(overs)s excedida para %(pid)s, intentando ejecutar %(min_count)d " "intsancias. %(msg)s" -#: nova/compute/api.py:385 +#: nova/compute/api.py:376 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d " @@ -4839,11 +4839,11 @@ msgstr "" "Cuota %(overs)s excedida para %(pid)s, intentando ejecutar entre " "%(min_count)d y %(max_count)d instancias. %(msg)s" -#: nova/compute/api.py:406 +#: nova/compute/api.py:397 msgid "Metadata type should be dict." msgstr "El tipo de metadato debería ser dict." -#: nova/compute/api.py:412 +#: nova/compute/api.py:403 #, python-format msgid "" "Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " @@ -4852,45 +4852,45 @@ msgstr "" "Se ha superado la cuota para %(pid)s, se ha intentado definir " "%(num_metadata)s propiedades de metadatos" -#: nova/compute/api.py:424 +#: nova/compute/api.py:415 #, python-format msgid "Metadata property key '%s' is not a string." msgstr "La clave de propiedad de los metadatos '%s' no es una cadena." -#: nova/compute/api.py:427 +#: nova/compute/api.py:418 #, python-format msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string." msgstr "" "El valor del atributo de metadatos '%(v)s' para la clave '%(k)s' no es " "una cadena." -#: nova/compute/api.py:431 +#: nova/compute/api.py:422 msgid "Metadata property key blank" msgstr "Clave de propiedad de metadatos en blanco" -#: nova/compute/api.py:434 +#: nova/compute/api.py:425 msgid "Metadata property key greater than 255 characters" msgstr "Clave de propiedad metadatos de más de 255 caracteres " -#: nova/compute/api.py:437 +#: nova/compute/api.py:428 msgid "Metadata property value greater than 255 characters" msgstr "Valor de propiedad de metadatos de más de 255 caracteres " -#: nova/compute/api.py:574 +#: nova/compute/api.py:565 msgid "Failed to set instance name using multi_instance_display_name_template." msgstr "" "Se ha encontrado un error en la definición del nombre de instancia " "mediante multi_instance_display_name_template." -#: nova/compute/api.py:676 +#: nova/compute/api.py:667 msgid "Cannot attach one or more volumes to multiple instances" msgstr "No se pueden conectar uno o más volúmenes a varias instancias" -#: nova/compute/api.py:718 +#: nova/compute/api.py:709 msgid "The requested availability zone is not available" msgstr "La zona de disponibilidad solicitada no está disponible" -#: nova/compute/api.py:1119 +#: nova/compute/api.py:1110 msgid "" "Images with destination_type 'volume' need to have a non-zero size " "specified" @@ -4898,13 +4898,13 @@ msgstr "" "Las imágenes con destination_type 'colume? necesitan tener un tamaño " "especificado diferente a cero" -#: nova/compute/api.py:1150 +#: nova/compute/api.py:1141 msgid "More than one swap drive requested." msgstr "Más de un controlador de intercambio ha sido solicitado." -#: nova/compute/api.py:1299 -#: nova/tests/api/openstack/compute/test_servers.py:3122 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2460 +#: nova/compute/api.py:1290 +#: nova/tests/api/openstack/compute/test_servers.py:3145 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2484 msgid "" "Unable to launch multiple instances with a single configured port ID. " "Please launch your instance one by one with different ports." @@ -4913,23 +4913,27 @@ msgstr "" "puerto configurado. Por favor lanza tu instancia una por una con puertos " "diferentes." -#: nova/compute/api.py:1401 +#: nova/compute/api.py:1311 +msgid "max_count cannot be greater than 1 if an fixed_ip is specified." +msgstr "" + +#: nova/compute/api.py:1415 msgid "instance termination disabled" msgstr "terminación de instancia inhabilitada" -#: nova/compute/api.py:1416 +#: nova/compute/api.py:1430 #, python-format msgid "Working on deleting snapshot %s from shelved instance..." msgstr "" "Trabajando en la remoción de la instantánea %s de la instancia " "almacenada..." -#: nova/compute/api.py:1423 +#: nova/compute/api.py:1437 #, python-format msgid "Failed to delete snapshot from shelved instance (%s)." msgstr "Fallo al remover la instantánea de la instancia almacenada (%s)." -#: nova/compute/api.py:1427 +#: nova/compute/api.py:1441 msgid "" "Something wrong happened when trying to delete snapshot from shelved " "instance." @@ -4937,13 +4941,13 @@ msgstr "" "Algo malo ha pasado al intentar eliminar la instantánea de la imagen " "almacenada." -#: nova/compute/api.py:1492 +#: nova/compute/api.py:1506 msgid "Instance is already in deleting state, ignoring this request" msgstr "" "La instancia ya se encuentra en estado de remoción, ignorando esta " "solicitud" -#: nova/compute/api.py:1540 +#: nova/compute/api.py:1553 #, python-format msgid "" "Found an unconfirmed migration during delete, id: %(id)s, status: " @@ -4952,105 +4956,109 @@ msgstr "" "Se ha encontrado una migración no confirmada durante la remoción, " "identificador: %(id)s, estado: %(status)s" -#: nova/compute/api.py:1550 +#: nova/compute/api.py:1563 msgid "Instance may have been confirmed during delete" msgstr "la instanacia debe haber sido confirmada durante la remoción" -#: nova/compute/api.py:1567 +#: nova/compute/api.py:1580 #, python-format msgid "Migration %s may have been confirmed during delete" msgstr "La migración %s debe haber sido conifrmada durante la remoción" -#: nova/compute/api.py:1603 +#: nova/compute/api.py:1615 #, python-format msgid "Flavor %d not found" msgstr "El sabor %d no ha sido encontrado" -#: nova/compute/api.py:1621 +#: nova/compute/api.py:1633 #, python-format msgid "instance's host %s is down, deleting from database" msgstr "el host de la instancia %s está inactivos, se suprime de la base de datos" -#: nova/compute/api.py:1648 nova/compute/manager.py:2279 +#: nova/compute/api.py:1660 #, python-format msgid "Ignoring volume cleanup failure due to %s" msgstr "Ignorando la anomalía de limpieza de volumen debido a %s " -#: nova/compute/api.py:2043 +#: nova/compute/api.py:2061 #, python-format msgid "snapshot for %s" msgstr "instantánea para %s " -#: nova/compute/api.py:2415 +#: nova/compute/api.py:2399 +msgid "Resize to zero disk flavor is not allowed." +msgstr "" + +#: nova/compute/api.py:2438 #, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance." msgstr "" "%(overs)s cuota excedida para %(pid)s, se ha intentado redimensionar la " "instancia. " -#: nova/compute/api.py:2584 +#: nova/compute/api.py:2613 msgid "Cannot rescue a volume-backed instance" msgstr "No se puede rescatar una instancia de volume-backed" -#: nova/compute/api.py:2811 +#: nova/compute/api.py:2840 msgid "Volume must be attached in order to detach." msgstr "El volumen debe estar conectado para desconectarse." -#: nova/compute/api.py:2831 +#: nova/compute/api.py:2860 msgid "Old volume is attached to a different instance." msgstr "Volumen antigüo está ligado a una instancia diferente." -#: nova/compute/api.py:2834 +#: nova/compute/api.py:2863 msgid "New volume must be detached in order to swap." msgstr "" "El nuevo volumen debe ser desasociado para poder activar la memoria de " "intercambio." -#: nova/compute/api.py:2837 +#: nova/compute/api.py:2866 msgid "New volume must be the same size or larger." msgstr "El nuevo volumen debe ser del mismo o de mayor tamaño." -#: nova/compute/api.py:3032 +#: nova/compute/api.py:3067 #, python-format msgid "Instance compute service state on %s expected to be down, but it was up." msgstr "" "El estado de la instancia del servicio de cómputo en %s debería ser " "inactivo, pero se encontraba activo." -#: nova/compute/api.py:3335 +#: nova/compute/api.py:3369 msgid "Host aggregate is not empty" msgstr "El agregado de anfitrión no está vacío" -#: nova/compute/api.py:3368 +#: nova/compute/api.py:3402 #, python-format msgid "More than 1 AZ for host %s" msgstr "" -#: nova/compute/api.py:3403 +#: nova/compute/api.py:3437 #, python-format msgid "Host already in availability zone %s" msgstr "Anfitrión actualmente en zona de disponibilidad %s" -#: nova/compute/api.py:3491 nova/tests/compute/test_keypairs.py:135 +#: nova/compute/api.py:3525 nova/tests/compute/test_keypairs.py:135 msgid "Keypair name contains unsafe characters" msgstr "El nombre de par de claves contiene caracteres no seguros" -#: nova/compute/api.py:3495 nova/tests/compute/test_keypairs.py:127 +#: nova/compute/api.py:3529 nova/tests/compute/test_keypairs.py:127 #: nova/tests/compute/test_keypairs.py:131 msgid "Keypair name must be between 1 and 255 characters long" msgstr "El nombre de par de claves debe tener entre 1 y 255 caracteres de longitud" -#: nova/compute/api.py:3583 +#: nova/compute/api.py:3617 #, python-format msgid "Security group %s is not a string or unicode" msgstr "El grupo de seguridad %s no es una serie o Unicode " -#: nova/compute/api.py:3586 +#: nova/compute/api.py:3620 #, python-format msgid "Security group %s cannot be empty." msgstr "El grupo de seguridad %s no puede estar vacío." -#: nova/compute/api.py:3594 +#: nova/compute/api.py:3628 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " @@ -5059,58 +5067,58 @@ msgstr "" "El valor (%(value)s) para el parámetro Group%(property)s es inválido. El " "contenido se limita a '%(allowed)s'." -#: nova/compute/api.py:3600 +#: nova/compute/api.py:3634 #, python-format msgid "Security group %s should not be greater than 255 characters." msgstr "El grupo de seguridad %s no debe tener más de 255 caracteres. " -#: nova/compute/api.py:3618 +#: nova/compute/api.py:3652 msgid "Quota exceeded, too many security groups." msgstr "Cuota superada, demasiados grupos de seguridad. " -#: nova/compute/api.py:3621 +#: nova/compute/api.py:3655 #, python-format msgid "Create Security Group %s" msgstr "Crear Grupo de Seguridad %s" -#: nova/compute/api.py:3633 +#: nova/compute/api.py:3667 #, python-format msgid "Security group %s already exists" msgstr "El grupo de seguridad %s ya existe" -#: nova/compute/api.py:3646 +#: nova/compute/api.py:3680 #, python-format msgid "Unable to update system group '%s'" msgstr "Incapaz de actualizar el grupo de sistema '%s'" -#: nova/compute/api.py:3708 +#: nova/compute/api.py:3742 #, python-format msgid "Unable to delete system group '%s'" msgstr "No se ha podido suprimir el grupo de sistemas '%s'" -#: nova/compute/api.py:3713 +#: nova/compute/api.py:3747 msgid "Security group is still in use" msgstr "El grupo de seguridad aún se está utilizando" -#: nova/compute/api.py:3723 +#: nova/compute/api.py:3757 msgid "Failed to update usages deallocating security group" msgstr "No se han podido actualizar los usos desasignando el grupo de seguridad " -#: nova/compute/api.py:3726 +#: nova/compute/api.py:3760 #, python-format msgid "Delete security group %s" msgstr "Borrar grupo de seguridad %s" -#: nova/compute/api.py:3802 nova/compute/api.py:3885 +#: nova/compute/api.py:3836 nova/compute/api.py:3919 #, python-format msgid "Rule (%s) not found" msgstr "No se ha encontrado la regla (%s)" -#: nova/compute/api.py:3818 +#: nova/compute/api.py:3852 msgid "Quota exceeded, too many security group rules." msgstr "Cuota superada, demasiadas reglas de grupo de seguridad " -#: nova/compute/api.py:3821 +#: nova/compute/api.py:3855 #, python-format msgid "" "Security group %(name)s added %(protocol)s ingress " @@ -5119,7 +5127,7 @@ msgstr "" "Grupo de seguridad %(name)s ha agregado %(protocol)s al ingreso " "(%(from_port)s:%(to_port)s)" -#: nova/compute/api.py:3836 +#: nova/compute/api.py:3870 #, python-format msgid "" "Security group %(name)s removed %(protocol)s ingress " @@ -5128,7 +5136,7 @@ msgstr "" "El grupo de seguridad %(name)s ha removido %(protocol)s del ingreso " "(%(from_port)s:%(to_port)s)" -#: nova/compute/api.py:3892 +#: nova/compute/api.py:3926 msgid "Security group id should be integer" msgstr "El id de grupo de seguridad debe ser un entero" @@ -5230,26 +5238,26 @@ msgstr "" "Los nombres de las claves solo pueden contener caracteres alfanuméricos, " "punto, guión, guión bajo, dos puntos y espacios." -#: nova/compute/manager.py:278 +#: nova/compute/manager.py:283 #, python-format msgid "Task possibly preempted: %s" msgstr "Tarea posiblemente preapropiada: %s" -#: nova/compute/manager.py:360 nova/compute/manager.py:2849 +#: nova/compute/manager.py:365 nova/compute/manager.py:2885 #, python-format msgid "Error while trying to clean up image %s" msgstr "Error al intentar limpiar imagen %s" -#: nova/compute/manager.py:501 +#: nova/compute/manager.py:506 msgid "Instance event failed" msgstr "El evento de instancia ha fallado" -#: nova/compute/manager.py:600 +#: nova/compute/manager.py:605 #, python-format msgid "%s is not a valid node managed by this compute host." msgstr "%s no es un nodo válido administrado por este anfitrión de cómputo." -#: nova/compute/manager.py:698 +#: nova/compute/manager.py:704 #, python-format msgid "" "Deleting instance as its host (%(instance_host)s) is not equal to our " @@ -5258,11 +5266,11 @@ msgstr "" "Suprimiendo instancia porque el host (%(instance_host)s) no es igual a " "nuestro host (%(our_host)s)." -#: nova/compute/manager.py:713 +#: nova/compute/manager.py:719 msgid "Instance has been marked deleted already, removing it from the hypervisor." msgstr "La instancia ya ha sido marcada como eliminada, removiendo del hipervisor." -#: nova/compute/manager.py:733 +#: nova/compute/manager.py:739 msgid "" "Hypervisor driver does not support instance shared storage check, " "assuming it's not on shared storage" @@ -5270,15 +5278,15 @@ msgstr "" "El hipervisor no soporta la validación de almacenamiento compartido entre" " instancias, asumiendo que no se encuentra en almacenamiento compartido." -#: nova/compute/manager.py:739 +#: nova/compute/manager.py:745 msgid "Failed to check if instance shared" msgstr "Fallo al verificar si la instancia se encuentra compartida" -#: nova/compute/manager.py:805 nova/compute/manager.py:856 +#: nova/compute/manager.py:811 nova/compute/manager.py:862 msgid "Failed to complete a deletion" msgstr "Fallo durante la compleción una remoción" -#: nova/compute/manager.py:838 +#: nova/compute/manager.py:844 msgid "" "Service started deleting the instance during the previous run, but did " "not finish. Restarting the deletion now." @@ -5286,7 +5294,7 @@ msgstr "" "El servicio ha iniciado la remoción de la instancia durante la ejecución " "previa, pero no ha finalizado. Reiniciando la remoción ahora." -#: nova/compute/manager.py:879 +#: nova/compute/manager.py:885 #, python-format msgid "" "Instance in transitional state (%(task_state)s) at start-up and power " @@ -5295,105 +5303,105 @@ msgstr "" "Instancia en estado transicional (%(task_state)s) al arranque y estado de" " energía es (%(power_state)s), limpiando el estado de la tarea" -#: nova/compute/manager.py:897 +#: nova/compute/manager.py:903 msgid "Failed to stop instance" msgstr "Fallo al detener instancia" -#: nova/compute/manager.py:909 +#: nova/compute/manager.py:915 msgid "Failed to start instance" msgstr "Fallo al iniciar instancia" -#: nova/compute/manager.py:934 +#: nova/compute/manager.py:940 msgid "Failed to revert crashed migration" msgstr "Se ha encontrado un error en al revertir la migración colgada" -#: nova/compute/manager.py:937 +#: nova/compute/manager.py:943 msgid "Instance found in migrating state during startup. Resetting task_state" msgstr "" "Se ha encontrado una instancia en estado de migración durante el inicio. " "Restableciendo task_state" -#: nova/compute/manager.py:954 +#: nova/compute/manager.py:960 msgid "Rebooting instance after nova-compute restart." msgstr "Rearrancando instancia después de reiniciar nova-compute. " -#: nova/compute/manager.py:964 +#: nova/compute/manager.py:970 msgid "Hypervisor driver does not support resume guests" msgstr "El controlador de hipervisor no soporta reanudar invitados " -#: nova/compute/manager.py:969 +#: nova/compute/manager.py:975 msgid "Failed to resume instance" msgstr "No se ha podido reanudar la instancia" -#: nova/compute/manager.py:978 +#: nova/compute/manager.py:984 msgid "Hypervisor driver does not support firewall rules" msgstr "El controlador de hipervisor no soporta reglas de cortafuegos " -#: nova/compute/manager.py:1003 +#: nova/compute/manager.py:1009 #, python-format -msgid "Lifecycle event %(state)d on VM %(uuid)s" -msgstr "Suceso de ciclo de vida %(state)d en máquina virtual %(uuid)s" +msgid "VM %(state)s (Lifecycle Event)" +msgstr "" -#: nova/compute/manager.py:1019 +#: nova/compute/manager.py:1025 #, python-format msgid "Unexpected power state %d" msgstr "Estado de alimentación inesperado %d" -#: nova/compute/manager.py:1124 +#: nova/compute/manager.py:1130 msgid "Hypervisor driver does not support security groups." msgstr "El controlador del hipervisor no soporta grupos de seguridad." -#: nova/compute/manager.py:1164 +#: nova/compute/manager.py:1168 #, python-format msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" "El volumen con id: %s ha finalizado su creación pero no ha sido marcado " "como 'disponible'" -#: nova/compute/manager.py:1222 nova/compute/manager.py:1978 +#: nova/compute/manager.py:1225 nova/compute/manager.py:1982 msgid "Success" msgstr "Éxito" -#: nova/compute/manager.py:1246 +#: nova/compute/manager.py:1249 msgid "Instance disappeared before we could start it" msgstr "La instancia ha desaparecido antes de poder iniciarla" -#: nova/compute/manager.py:1274 +#: nova/compute/manager.py:1276 msgid "Anti-affinity instance group policy was violated." msgstr "la política de grupo de anti-afinidad fue violada." -#: nova/compute/manager.py:1351 +#: nova/compute/manager.py:1353 msgid "Failed to dealloc network for deleted instance" msgstr "No se ha podido desasignar la red para la instancia suprimida" -#: nova/compute/manager.py:1356 +#: nova/compute/manager.py:1358 msgid "Instance disappeared during build" msgstr "La instancia despareció durante su construcción" -#: nova/compute/manager.py:1372 +#: nova/compute/manager.py:1374 msgid "Failed to dealloc network for failed instance" msgstr "Fallo al desasociar red para la instancia fallida" -#: nova/compute/manager.py:1399 +#: nova/compute/manager.py:1401 #, python-format msgid "Error: %s" msgstr "Error: %s" -#: nova/compute/manager.py:1445 nova/compute/manager.py:3473 +#: nova/compute/manager.py:1447 nova/compute/manager.py:3509 msgid "Error trying to reschedule" msgstr "Error al intentar volver a programar " -#: nova/compute/manager.py:1500 +#: nova/compute/manager.py:1503 msgid "Instance build timed out. Set to error state." msgstr "" "La compilación de instancia ha excedido el tiempo de espera. Se ha estado" " en estado erróneo. " -#: nova/compute/manager.py:1510 nova/compute/manager.py:1870 +#: nova/compute/manager.py:1513 nova/compute/manager.py:1873 msgid "Starting instance..." msgstr "Iniciando instancia..." -#: nova/compute/manager.py:1528 +#: nova/compute/manager.py:1531 #, python-format msgid "" "Treating negative config value (%(retries)s) for " @@ -5402,135 +5410,135 @@ msgstr "" "Tratando el valor negativo de configuración (%(retries)s) para " "'network_allocate_retries' como 0." -#: nova/compute/manager.py:1553 +#: nova/compute/manager.py:1556 #, python-format msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" "La configuración de red de la instancia falló después de %(attempts)d " "intento(s)" -#: nova/compute/manager.py:1557 +#: nova/compute/manager.py:1560 #, python-format msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" "Fallo de configuración de red de la instancia (intento %(attempt)d de " "%(attempts)d)" -#: nova/compute/manager.py:1738 +#: nova/compute/manager.py:1741 msgid "Instance failed block device setup" msgstr "Ha fallado la configuración de dispositivo de bloque en la instancia" -#: nova/compute/manager.py:1758 nova/compute/manager.py:2086 -#: nova/compute/manager.py:3985 +#: nova/compute/manager.py:1761 nova/compute/manager.py:2098 +#: nova/compute/manager.py:4041 msgid "Instance failed to spawn" msgstr "La instancia no se ha podido generar" -#: nova/compute/manager.py:1937 +#: nova/compute/manager.py:1941 msgid "Unexpected build failure, not rescheduling build." msgstr "Fallo de compilación inesperado, no se reprogramará la compilación." -#: nova/compute/manager.py:2002 +#: nova/compute/manager.py:2006 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2008 nova/compute/manager.py:2048 +#: nova/compute/manager.py:2012 nova/compute/manager.py:2060 msgid "Failed to allocate network(s)" msgstr "Fallo al asociar red(es)" -#: nova/compute/manager.py:2012 nova/compute/manager.py:2050 +#: nova/compute/manager.py:2016 nova/compute/manager.py:2062 msgid "Failed to allocate the network(s), not rescheduling." msgstr "Fallo al asociar la(s) red(es), no se reprogramará." -#: nova/compute/manager.py:2074 +#: nova/compute/manager.py:2086 msgid "Failure prepping block device" msgstr "Fallo al preparar el dispositivo de bloques" -#: nova/compute/manager.py:2076 +#: nova/compute/manager.py:2088 msgid "Failure prepping block device." msgstr "Fallo al preparar el dispositivo de bloque." -#: nova/compute/manager.py:2099 +#: nova/compute/manager.py:2111 msgid "Could not clean up failed build, not rescheduling" msgstr "No se puede limpiar la compilación fallida, no se reprogramará." -#: nova/compute/manager.py:2109 +#: nova/compute/manager.py:2121 msgid "Failed to deallocate networks" msgstr "Fallo al desasociar redes" -#: nova/compute/manager.py:2130 +#: nova/compute/manager.py:2142 msgid "Failed to cleanup volumes for failed build, not rescheduling" msgstr "" "Fallo al limpiar los volúmenes para la compilación fallida, no se " "reprogramará" -#: nova/compute/manager.py:2169 +#: nova/compute/manager.py:2181 msgid "Failed to deallocate network for instance." msgstr "Se ha encontrado un error al desasignar la red para la instancia" -#: nova/compute/manager.py:2178 +#: nova/compute/manager.py:2202 #, python-format msgid "%(action_str)s instance" msgstr "%(action_str)s instancia" -#: nova/compute/manager.py:2222 +#: nova/compute/manager.py:2246 #, python-format msgid "Ignoring DiskNotFound: %s" msgstr "Ignorando DiskNotFound: %s" -#: nova/compute/manager.py:2225 +#: nova/compute/manager.py:2249 #, python-format msgid "Ignoring VolumeNotFound: %s" msgstr "Ignorando VolumeNotFound: %s" -#: nova/compute/manager.py:2324 +#: nova/compute/manager.py:2353 msgid "Instance disappeared during terminate" msgstr "La instancia ha desaparecido durante la terminación" -#: nova/compute/manager.py:2330 nova/compute/manager.py:3653 -#: nova/compute/manager.py:5671 +#: nova/compute/manager.py:2359 nova/compute/manager.py:3689 +#: nova/compute/manager.py:5769 msgid "Setting instance vm_state to ERROR" msgstr "Estableciendo el vm_state de la instancia a ERROR" -#: nova/compute/manager.py:2503 +#: nova/compute/manager.py:2539 msgid "Rebuilding instance" msgstr "Volver a crear instancia" -#: nova/compute/manager.py:2516 +#: nova/compute/manager.py:2552 msgid "Invalid state of instance files on shared storage" msgstr "Estado no válido de archivos de instancia en almacenamiento compartido" -#: nova/compute/manager.py:2520 +#: nova/compute/manager.py:2556 msgid "disk on shared storage, recreating using existing disk" msgstr "" "disco en almacenamiento compartido, volviendo a crear utilizando disco " "existente" -#: nova/compute/manager.py:2524 +#: nova/compute/manager.py:2560 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "El disco on está en almacenamiento compartido, reconstruyendo desde: '%s'" -#: nova/compute/manager.py:2535 nova/compute/manager.py:4790 +#: nova/compute/manager.py:2571 nova/compute/manager.py:4884 #, python-format msgid "Failed to get compute_info for %s" msgstr "Fallo al obtener compute_info para %s" -#: nova/compute/manager.py:2611 +#: nova/compute/manager.py:2647 #, python-format msgid "bringing vm to original state: '%s'" msgstr "poniendo vm en estado original: '%s'" -#: nova/compute/manager.py:2642 +#: nova/compute/manager.py:2678 #, python-format msgid "Detaching from volume api: %s" msgstr "Desconectando de la API del volumen: %s" -#: nova/compute/manager.py:2669 +#: nova/compute/manager.py:2705 msgid "Rebooting instance" msgstr "Rearrancando instancia" -#: nova/compute/manager.py:2686 +#: nova/compute/manager.py:2722 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " @@ -5539,24 +5547,24 @@ msgstr "" "intentando rearrancar una instancia que no se está ejecutando: (estado: " "%(state)s se esperaba: %(running)s)" -#: nova/compute/manager.py:2722 +#: nova/compute/manager.py:2758 msgid "Reboot failed but instance is running" msgstr "Ha fallado el reinicio pero la instancia se mantiene en ejecución" -#: nova/compute/manager.py:2730 +#: nova/compute/manager.py:2766 #, python-format msgid "Cannot reboot instance: %s" msgstr "No se puede reiniciar instancia: %s" -#: nova/compute/manager.py:2742 +#: nova/compute/manager.py:2778 msgid "Instance disappeared during reboot" msgstr "La instancia ha desaparecido durante el rearranque" -#: nova/compute/manager.py:2810 +#: nova/compute/manager.py:2846 msgid "instance snapshotting" msgstr "creación de instantánea de instancia" -#: nova/compute/manager.py:2816 +#: nova/compute/manager.py:2852 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " @@ -5565,37 +5573,37 @@ msgstr "" "intentando hacer una instantánea de una instancia que no se está " "ejecutando: (estado: %(state)s se esperaba: %(running)s)" -#: nova/compute/manager.py:2854 +#: nova/compute/manager.py:2890 msgid "Image not found during snapshot" msgstr "No se ha encontrado la imagen durante la instantánea" -#: nova/compute/manager.py:2936 +#: nova/compute/manager.py:2972 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "" "No se ha podido establecer contraseña de administrador. La instancia %s " "no está ejecutando" -#: nova/compute/manager.py:2943 +#: nova/compute/manager.py:2979 msgid "Root password set" msgstr "Contraseña raíz establecida" -#: nova/compute/manager.py:2948 +#: nova/compute/manager.py:2984 msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "esta instancia de invitado o controlador no implementa set_admin_password" " ." -#: nova/compute/manager.py:2961 +#: nova/compute/manager.py:2997 #, python-format msgid "set_admin_password failed: %s" msgstr "set_admin_password ha fallado: %s" -#: nova/compute/manager.py:2967 +#: nova/compute/manager.py:3003 msgid "error setting admin password" msgstr "error al establecer contraseña de administrador" -#: nova/compute/manager.py:2983 +#: nova/compute/manager.py:3019 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " @@ -5604,12 +5612,12 @@ msgstr "" "intentando inyectar un archivo hacia un inactivo (estado: " "%(current_state)s esperado: %(expected_state)s)" -#: nova/compute/manager.py:2988 +#: nova/compute/manager.py:3024 #, python-format msgid "injecting file to %s" msgstr "inyectando archivo a %s" -#: nova/compute/manager.py:3006 +#: nova/compute/manager.py:3042 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" @@ -5617,34 +5625,34 @@ msgstr "" "No se ha podido encontrar una imagen diferente para utilizarla para VM de" " rescate, se utiliza la imagen actual de la instancia" -#: nova/compute/manager.py:3025 +#: nova/compute/manager.py:3061 msgid "Rescuing" msgstr "Rescatando" -#: nova/compute/manager.py:3046 +#: nova/compute/manager.py:3082 msgid "Error trying to Rescue Instance" msgstr "Error al intentar Rescatar Instancia" -#: nova/compute/manager.py:3050 +#: nova/compute/manager.py:3086 #, python-format msgid "Driver Error: %s" msgstr "Error de dispositivo: %s" -#: nova/compute/manager.py:3073 +#: nova/compute/manager.py:3109 msgid "Unrescuing" msgstr "Cancelando rescate" -#: nova/compute/manager.py:3144 +#: nova/compute/manager.py:3180 #, python-format msgid "Migration %s is not found during confirmation" msgstr "La migración %s no ha sido encontrada durante la confirmación" -#: nova/compute/manager.py:3149 +#: nova/compute/manager.py:3185 #, python-format msgid "Migration %s is already confirmed" msgstr "La migración %s ya ha sido confirmada" -#: nova/compute/manager.py:3153 +#: nova/compute/manager.py:3189 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " @@ -5653,118 +5661,118 @@ msgstr "" "Estado de confirmación inesperado '%(status)s' de la migración %(id)s, " "salir del proceso de confirmación" -#: nova/compute/manager.py:3167 +#: nova/compute/manager.py:3203 msgid "Instance is not found during confirmation" msgstr "La instancia no ha sido encontrada durante la confirmación" -#: nova/compute/manager.py:3348 +#: nova/compute/manager.py:3384 #, python-format msgid "Updating instance to original state: '%s'" msgstr "Actualizando el estado original de instancia hacia: '%s'" -#: nova/compute/manager.py:3371 +#: nova/compute/manager.py:3407 msgid "Instance has no source host" msgstr "La instancia no tiene ningún host de origen" -#: nova/compute/manager.py:3377 +#: nova/compute/manager.py:3413 msgid "destination same as source!" msgstr "destino igual que origen" -#: nova/compute/manager.py:3395 +#: nova/compute/manager.py:3431 msgid "Migrating" msgstr "Migrando" -#: nova/compute/manager.py:3659 +#: nova/compute/manager.py:3695 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "Fallo al revertir las cuotas para un finish_resize fallido: %s" -#: nova/compute/manager.py:3719 +#: nova/compute/manager.py:3755 msgid "Pausing" msgstr "Poniéndose en pausa" -#: nova/compute/manager.py:3736 +#: nova/compute/manager.py:3772 msgid "Unpausing" msgstr "Cancelando la pausa" -#: nova/compute/manager.py:3777 +#: nova/compute/manager.py:3813 nova/compute/manager.py:3830 msgid "Retrieving diagnostics" msgstr "Recuperando diagnósticos" -#: nova/compute/manager.py:3812 +#: nova/compute/manager.py:3866 msgid "Resuming" msgstr "Reanudando" -#: nova/compute/manager.py:4028 +#: nova/compute/manager.py:4084 msgid "Get console output" msgstr "Obtener salida de consola " -#: nova/compute/manager.py:4227 +#: nova/compute/manager.py:4283 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "Conectando el volumen %(volume_id)s a %(mountpoint)s" -#: nova/compute/manager.py:4236 +#: nova/compute/manager.py:4292 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "Fallo al asociar %(volume_id)s en %(mountpoint)s" -#: nova/compute/manager.py:4252 +#: nova/compute/manager.py:4308 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "Desconectar el volumen %(volume_id)s del punto de montaje %(mp)s" -#: nova/compute/manager.py:4263 +#: nova/compute/manager.py:4319 msgid "Detaching volume from unknown instance" msgstr "Desconectando volumen de instancia desconocida " -#: nova/compute/manager.py:4275 +#: nova/compute/manager.py:4331 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "No se ha podido desconectar el volumen %(volume_id)s de %(mp)s" -#: nova/compute/manager.py:4348 +#: nova/compute/manager.py:4404 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "Fallo para intercambiar volúmen %(old_volume_id)s por %(new_volume_id)s" -#: nova/compute/manager.py:4355 +#: nova/compute/manager.py:4411 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" "Fallo al conectar hacia al volúmen %(volume_id)s con el volumen en " "%(mountpoint)s" -#: nova/compute/manager.py:4442 +#: nova/compute/manager.py:4504 #, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "allocate_port_for_instance ha regresado %(ports)s puertos" -#: nova/compute/manager.py:4462 +#: nova/compute/manager.py:4524 #, python-format msgid "Port %s is not attached" msgstr "El puerto %s no se encuentra asignado" -#: nova/compute/manager.py:4474 nova/tests/compute/test_compute.py:10545 +#: nova/compute/manager.py:4536 nova/tests/compute/test_compute.py:10612 #, python-format msgid "Host %s not found" msgstr "No se ha encontrado el host %s" -#: nova/compute/manager.py:4628 +#: nova/compute/manager.py:4690 #, python-format msgid "Pre live migration failed at %s" msgstr "Previo a migración en vivo falló en %s" -#: nova/compute/manager.py:4658 +#: nova/compute/manager.py:4753 msgid "_post_live_migration() is started.." msgstr "Se ha iniciado _post_live_migration()." -#: nova/compute/manager.py:4731 +#: nova/compute/manager.py:4825 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "La migración de la instancia hacia %s ha finalizado exitosamente." -#: nova/compute/manager.py:4733 +#: nova/compute/manager.py:4827 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." @@ -5773,15 +5781,15 @@ msgstr "" "encontrado: ningún dominio con un nombre coincidente.\" Este error se " "puede ignorar sin ningún riesgo." -#: nova/compute/manager.py:4758 +#: nova/compute/manager.py:4852 msgid "Post operation of migration started" msgstr "Se ha iniciado la operación posterior de migración" -#: nova/compute/manager.py:4967 +#: nova/compute/manager.py:5057 msgid "An error occurred while refreshing the network cache." msgstr "Ha ocurrido un error al actualizar el cache de red." -#: nova/compute/manager.py:5021 +#: nova/compute/manager.py:5110 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " @@ -5790,12 +5798,12 @@ msgstr "" "Se han encontrado %(migration_count)d migraciones sin confirmar de más de" " %(confirm_window)d segundos" -#: nova/compute/manager.py:5026 +#: nova/compute/manager.py:5115 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "Estableciendo la %(migration_id)s en error: %(reason)s" -#: nova/compute/manager.py:5035 +#: nova/compute/manager.py:5124 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " @@ -5804,32 +5812,32 @@ msgstr "" "Confirmando automáticamente la migración %(migration_id)s para la " "instancia %(instance_uuid)s" -#: nova/compute/manager.py:5045 +#: nova/compute/manager.py:5134 #, python-format msgid "Instance %s not found" msgstr "No se ha encontrado la instancia %s" -#: nova/compute/manager.py:5050 +#: nova/compute/manager.py:5139 msgid "In ERROR state" msgstr "En estado de ERROR " -#: nova/compute/manager.py:5057 +#: nova/compute/manager.py:5146 #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "En los estados %(vm_state)s/%(task_state)s, no REDIMENSIONADO/Ninguno" -#: nova/compute/manager.py:5068 +#: nova/compute/manager.py:5157 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" "Error auto confirmando modificación de tamaño: %s. Se intentará " "posteriormente." -#: nova/compute/manager.py:5097 +#: nova/compute/manager.py:5186 msgid "Periodic task failed to offload instance." msgstr "Tarea periódica falló al descargar instancia." -#: nova/compute/manager.py:5117 +#: nova/compute/manager.py:5206 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " @@ -5838,20 +5846,20 @@ msgstr "" "Ejecutando auditoría de uso de instancia para %(host)s desde " "%(begin_time)s hasta %(end_time)s. %(number_instances)s instancias." -#: nova/compute/manager.py:5137 +#: nova/compute/manager.py:5226 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "No se ha podido generar auditoría de uso para la instancia en el host %s " -#: nova/compute/manager.py:5166 +#: nova/compute/manager.py:5255 msgid "Updating bandwidth usage cache" msgstr "Actualizando memoria caché de uso de ancho de banda" -#: nova/compute/manager.py:5188 +#: nova/compute/manager.py:5277 msgid "Bandwidth usage not supported by hypervisor." msgstr "Uso de ancho de banda no soportado por el hipervisor." -#: nova/compute/manager.py:5311 +#: nova/compute/manager.py:5400 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " @@ -5860,20 +5868,7 @@ msgstr "" "Se han encontrado %(num_db_instances)s en la base de datos y " "%(num_vm_instances)s en el hipervisor." -#: nova/compute/manager.py:5318 nova/compute/manager.py:5381 -#, python-format -msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." -msgstr "" -"Durante sync_power_state la instancia ha dejado una tarea pendiente " -"(%(task)s). Omitir." - -#: nova/compute/manager.py:5342 -msgid "Periodic sync_power_state task had an error while processing an instance." -msgstr "" -"La tarea periódica sync_power_state ha tenido un error al procesar una " -"instancia." - -#: nova/compute/manager.py:5368 +#: nova/compute/manager.py:5466 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" @@ -5882,63 +5877,70 @@ msgstr "" "Durante el proceso sync_power, la instancia se ha movido del host %(src)s" " al host %(dst)s" -#: nova/compute/manager.py:5406 +#: nova/compute/manager.py:5479 +#, python-format +msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" +"Durante sync_power_state la instancia ha dejado una tarea pendiente " +"(%(task)s). Omitir." + +#: nova/compute/manager.py:5504 msgid "Instance shutdown by itself. Calling the stop API." msgstr "Conclusión de instancia por sí misma. Llamando a la API de detención." -#: nova/compute/manager.py:5418 nova/compute/manager.py:5427 -#: nova/compute/manager.py:5458 nova/compute/manager.py:5469 +#: nova/compute/manager.py:5516 nova/compute/manager.py:5525 +#: nova/compute/manager.py:5556 nova/compute/manager.py:5567 msgid "error during stop() in sync_power_state." msgstr "error durante stop() en sync_power_state." -#: nova/compute/manager.py:5422 +#: nova/compute/manager.py:5520 msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "" "La instancia se ha suspendido inesperadamente. Llamando a la API de " "detención." -#: nova/compute/manager.py:5438 +#: nova/compute/manager.py:5536 msgid "Instance is paused unexpectedly. Ignore." msgstr "La instancia se ha puesto en pausa inesperadamente. Ignorar. " -#: nova/compute/manager.py:5444 +#: nova/compute/manager.py:5542 msgid "Instance is unexpectedly not found. Ignore." msgstr "La instancia no se encuentra inesperadamente. Ignorar. " -#: nova/compute/manager.py:5450 +#: nova/compute/manager.py:5548 msgid "Instance is not stopped. Calling the stop API." msgstr "La instancia no se ha detenido. Llamando a la API de detención." -#: nova/compute/manager.py:5464 +#: nova/compute/manager.py:5562 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" "La instancia pausada se ha apagado a si misma. Llamando la API de " "detención." -#: nova/compute/manager.py:5478 +#: nova/compute/manager.py:5576 msgid "Instance is not (soft-)deleted." msgstr "La instancia no se suprime (de forma no permanente). " -#: nova/compute/manager.py:5507 +#: nova/compute/manager.py:5605 msgid "Reclaiming deleted instance" msgstr "Reclamando instancia suprimida" -#: nova/compute/manager.py:5511 +#: nova/compute/manager.py:5609 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "Reclamación periódica falló al eliminar instancia: %s" -#: nova/compute/manager.py:5536 +#: nova/compute/manager.py:5634 #, python-format msgid "Deleting orphan compute node %s" msgstr "Eliminando nodo de cómputo huérfano %s" -#: nova/compute/manager.py:5544 nova/compute/resource_tracker.py:392 +#: nova/compute/manager.py:5642 nova/compute/resource_tracker.py:391 #, python-format msgid "No service record for host %s" msgstr "Ningún registro de servicio para el host %s " -#: nova/compute/manager.py:5585 +#: nova/compute/manager.py:5682 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " @@ -5948,7 +5950,7 @@ msgstr "" " marcada como ELIMINADA pero todavía se encuentra presente en el " "anfitrión." -#: nova/compute/manager.py:5591 +#: nova/compute/manager.py:5688 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" @@ -5957,15 +5959,15 @@ msgstr "" "Apagando la instancia con nombre '%s' que está marcada como ELIMINADA " "pero sigue presente en el anfitrión." -#: nova/compute/manager.py:5600 +#: nova/compute/manager.py:5697 msgid "set_bootable is not implemented for the current driver" msgstr "set_bootable no está implementado en el controlador actual" -#: nova/compute/manager.py:5605 +#: nova/compute/manager.py:5702 msgid "Failed to power off instance" msgstr "Fallo al apagar la instancia" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5706 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " @@ -5974,27 +5976,27 @@ msgstr "" "Desrtuyendo instancia con etiqueta de nombre '%s' la cual ha sido marcada" " como ELIMINADA pero todavía se encuentra presente en el anfitrión." -#: nova/compute/manager.py:5619 +#: nova/compute/manager.py:5716 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "Limpieza periódica falló al eliminar la instancia: %s" -#: nova/compute/manager.py:5623 +#: nova/compute/manager.py:5720 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valor '%s' no reconocido para CONF.running_deleted_instance_action" -#: nova/compute/manager.py:5654 +#: nova/compute/manager.py:5752 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "Estableciendo la instancia de vuelta a %(state)s tras: %(error)s" -#: nova/compute/manager.py:5664 +#: nova/compute/manager.py:5762 #, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "Marcando la instancia de nuevo como ACTIVA después de: %s" -#: nova/compute/resource_tracker.py:106 +#: nova/compute/resource_tracker.py:105 msgid "" "Host field should not be set on the instance until resources have been " "claimed." @@ -6002,7 +6004,7 @@ msgstr "" "El campo de host no se debe establecer en la instancia hasta que los " "recursos se hayan reclamado." -#: nova/compute/resource_tracker.py:111 +#: nova/compute/resource_tracker.py:110 msgid "" "Node field should not be set on the instance until resources have been " "claimed." @@ -6010,16 +6012,16 @@ msgstr "" "El campo Nodo no debe ser establecido en la instancia hasta que los " "recursos han sido reclamados." -#: nova/compute/resource_tracker.py:273 +#: nova/compute/resource_tracker.py:272 #, python-format msgid "Cannot get the metrics from %s." msgstr "No se pueden obtener las métricas de %s." -#: nova/compute/resource_tracker.py:292 +#: nova/compute/resource_tracker.py:291 msgid "Auditing locally available compute resources" msgstr "Auditando recursos de cálculo disponibles localmente" -#: nova/compute/resource_tracker.py:297 +#: nova/compute/resource_tracker.py:296 msgid "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." @@ -6027,54 +6029,54 @@ msgstr "" "El controlador Virt no soporta 'get_available_resource'. El seguimiento " "de cálculo está inhabilitado." -#: nova/compute/resource_tracker.py:372 +#: nova/compute/resource_tracker.py:371 #, python-format msgid "Compute_service record created for %(host)s:%(node)s" msgstr "Registro compute_service creado para %(host)s:%(node)s" -#: nova/compute/resource_tracker.py:378 +#: nova/compute/resource_tracker.py:377 #, python-format msgid "Compute_service record updated for %(host)s:%(node)s" msgstr "El registro compute_service se ha actualizado para %(host)s:%(node)s" -#: nova/compute/resource_tracker.py:431 +#: nova/compute/resource_tracker.py:430 #, python-format msgid "Free ram (MB): %s" msgstr "RAM libre (MB): %s " -#: nova/compute/resource_tracker.py:432 +#: nova/compute/resource_tracker.py:431 #, python-format msgid "Free disk (GB): %s" msgstr "Disco libre (GB): %s " -#: nova/compute/resource_tracker.py:437 +#: nova/compute/resource_tracker.py:436 #, python-format msgid "Free VCPUS: %s" msgstr "VCPUS libres: %s" -#: nova/compute/resource_tracker.py:439 +#: nova/compute/resource_tracker.py:438 msgid "Free VCPU information unavailable" msgstr "Información de VCPU libre no disponible" -#: nova/compute/resource_tracker.py:442 +#: nova/compute/resource_tracker.py:441 #, python-format msgid "PCI stats: %s" msgstr "" -#: nova/compute/resource_tracker.py:478 +#: nova/compute/resource_tracker.py:486 #, python-format msgid "Updating from migration %s" msgstr "Actualizando desde la migración %s" -#: nova/compute/resource_tracker.py:545 +#: nova/compute/resource_tracker.py:553 msgid "Instance not resizing, skipping migration." msgstr "La instancia no se está redimensionando, se salta la migración." -#: nova/compute/resource_tracker.py:560 +#: nova/compute/resource_tracker.py:568 msgid "Flavor could not be found, skipping migration." msgstr "El sabor no puede ser encontrado, omitiendo migración." -#: nova/compute/resource_tracker.py:650 +#: nova/compute/resource_tracker.py:658 #, python-format msgid "" "Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB " @@ -6083,7 +6085,7 @@ msgstr "" "Se ha detectado una instancia huérfana en ejecución: %(uuid)s " "(consumiento %(memory_mb)s MB de memoria)" -#: nova/compute/resource_tracker.py:664 +#: nova/compute/resource_tracker.py:672 #, python-format msgid "Missing keys: %s" msgstr "Faltan claves: %s" @@ -6097,12 +6099,12 @@ msgstr "No se ha especificado ningún host de cálculo" msgid "Unable to find host for Instance %s" msgstr "No se puede encontrar el host para la instancia %s " -#: nova/compute/utils.py:209 +#: nova/compute/utils.py:204 #, python-format msgid "Can't access image %(image_id)s: %(error)s" msgstr "No se puede acceder a la imagen %(image_id)s: %(error)s" -#: nova/compute/utils.py:333 +#: nova/compute/utils.py:328 #, python-format msgid "" "No host name specified for the notification of HostAPI.%s and it will be " @@ -6111,7 +6113,7 @@ msgstr "" "No ha sido especificado un nombre de anfitrión para la notificación de " "HostAPI.%s y será ignorada" -#: nova/compute/utils.py:461 +#: nova/compute/utils.py:456 #, python-format msgid "" "Value of 0 or None specified for %s. This behaviour will change in " @@ -6123,7 +6125,7 @@ msgstr "" " tasa predeterminada' en lugar de 'no llamar'. Para mantener el " "comportamiento 'no llamar', utiliza un valor negativo." -#: nova/compute/monitors/__init__.py:177 +#: nova/compute/monitors/__init__.py:176 #, python-format msgid "" "Excluding monitor %(monitor_name)s due to metric name overlap; " @@ -6132,12 +6134,12 @@ msgstr "" "Excluyendo el monitor %(monitor_name)s debido a superposición de nombre " "de métrica; metricas superpuestas: %(overlap)s" -#: nova/compute/monitors/__init__.py:185 +#: nova/compute/monitors/__init__.py:184 #, python-format msgid "Monitor %(monitor_name)s cannot be used: %(ex)s" msgstr "El monitor %(monitor_name)s no puede ser utilizado: %(ex)s" -#: nova/compute/monitors/__init__.py:191 +#: nova/compute/monitors/__init__.py:190 #, python-format msgid "The following monitors have been disabled: %s" msgstr "Los siguientes monitores han sido deshabilitados: %s" @@ -6149,11 +6151,11 @@ msgstr "" "No todas las propiedades necesarias están implementadas en el controlador" " de cómputo: %s" -#: nova/conductor/api.py:300 +#: nova/conductor/api.py:318 msgid "nova-conductor connection established successfully" msgstr "" -#: nova/conductor/api.py:305 +#: nova/conductor/api.py:323 msgid "" "Timed out waiting for nova-conductor. Is it running? Or did this service" " start before nova-conductor? Reattempting establishment of nova-" @@ -6165,7 +6167,7 @@ msgstr "" msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s" msgstr "Se intentado actualizar instancia para '%(key)s' en %(instance_uuid)s" -#: nova/conductor/manager.py:522 +#: nova/conductor/manager.py:523 msgid "No valid host found for cold migrate" msgstr "No se ha encontrado anfitrión para migración en frío" @@ -6193,6 +6195,10 @@ msgstr "" "Se ha intentado desarchivar pero vm_state no se encuentra como SHELVED o " "SHELVED_OFFLOADED" +#: nova/conductor/manager.py:737 +msgid "No valid host found for rebuild" +msgstr "" + #: nova/conductor/tasks/live_migrate.py:113 #, python-format msgid "" @@ -6285,31 +6291,31 @@ msgstr "El parámetro model o base_model debe ser una subclase de NovaBase" msgid "Unrecognized read_deleted value '%s'" msgstr "Valor de read_deleted no reconocido '%s'" -#: nova/db/sqlalchemy/api.py:745 +#: nova/db/sqlalchemy/api.py:750 #, python-format msgid "Invalid floating ip id %s in request" msgstr "Identificador de dirección IP flotante inválida %s en solicitud" -#: nova/db/sqlalchemy/api.py:850 +#: nova/db/sqlalchemy/api.py:855 msgid "Failed to update usages bulk deallocating floating IP" msgstr "Fallo al actualizar uso de desasignación masiva de IP fotante" -#: nova/db/sqlalchemy/api.py:1006 +#: nova/db/sqlalchemy/api.py:1011 #, python-format msgid "Invalid floating IP %s in request" msgstr "Dirección IP flotante inválida %s en la solicitud" -#: nova/db/sqlalchemy/api.py:1308 nova/db/sqlalchemy/api.py:1347 +#: nova/db/sqlalchemy/api.py:1313 nova/db/sqlalchemy/api.py:1352 #, python-format msgid "Invalid fixed IP Address %s in request" msgstr "Dirección IP fija inválida %s en la solicitud" -#: nova/db/sqlalchemy/api.py:1482 +#: nova/db/sqlalchemy/api.py:1487 #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Dirección de interfaz virtual inválida %s en la solicitud" -#: nova/db/sqlalchemy/api.py:1576 +#: nova/db/sqlalchemy/api.py:1581 #, python-format msgid "" "Unknown osapi_compute_unique_server_name_scope value: %s Flag must be " @@ -6318,22 +6324,22 @@ msgstr "" "Valor de osapi_compute_unique_server_name_scope desconocido: %s El " "distintivo debe ser vacío, \"global\" o \"project\"" -#: nova/db/sqlalchemy/api.py:1735 +#: nova/db/sqlalchemy/api.py:1741 #, python-format msgid "Invalid instance id %s in request" msgstr "ID de instancia %s no válido en la solicitud." -#: nova/db/sqlalchemy/api.py:2013 +#: nova/db/sqlalchemy/api.py:2019 #, python-format msgid "Invalid field name: %s" msgstr "Campo de nombre inválido: %s" -#: nova/db/sqlalchemy/api.py:3242 +#: nova/db/sqlalchemy/api.py:3248 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "El cambio hará el uso menos de 0 para los siguientes recursos: %s" -#: nova/db/sqlalchemy/api.py:4892 +#: nova/db/sqlalchemy/api.py:4899 #, python-format msgid "" "Volume(%s) has lower stats then what is in the database. Instance must " @@ -6343,14 +6349,14 @@ msgstr "" "datos. la instancia debió haber reiniciado o colapsado. Actualizando los " "totales." -#: nova/db/sqlalchemy/api.py:5249 +#: nova/db/sqlalchemy/api.py:5256 #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Fallo en adición de metadata para el agregado %(id)s después de " "%(retries)s intentos" -#: nova/db/sqlalchemy/api.py:5639 +#: nova/db/sqlalchemy/api.py:5646 #, python-format msgid "IntegrityError detected when archiving table %s" msgstr "Se ha detectado un IntegrityError al archivar la tabla %s" @@ -6403,7 +6409,7 @@ msgstr "Excepción al crear la tabla." msgid "Exception while seeding instance_types table" msgstr "Excepción al iniciar la tabla instance_types" -#: nova/image/glance.py:231 +#: nova/image/glance.py:236 #, python-format msgid "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " @@ -6412,7 +6418,7 @@ msgstr "" "Error al contactar con el servidor de glance '%(host)s:%(port)s' para " "'%(method)s', %(extra)s." -#: nova/image/glance.py:265 +#: nova/image/glance.py:268 #, python-format msgid "" "When loading the module %(module_str)s the following error occurred: " @@ -6421,12 +6427,12 @@ msgstr "" "Al cargar el módulo %(module_str)s se ha presentado el siguiente error: " "%(ex)s" -#: nova/image/glance.py:303 +#: nova/image/glance.py:306 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "Fallo al instanciar el manejador de descargas para %(scheme)s" -#: nova/image/glance.py:319 +#: nova/image/glance.py:322 #, python-format msgid "Successfully transferred using %s" msgstr "Exitosamente transferido utilizando %s" @@ -6587,16 +6593,16 @@ msgstr "" msgid "Not deleting key %s" msgstr "Sin eliminar la clave %s" -#: nova/network/api.py:198 nova/network/neutronv2/api.py:797 +#: nova/network/api.py:195 nova/network/neutronv2/api.py:797 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "volver a asignar IP flotante %(address)s desde instancia %(instance_id)s" -#: nova/network/base_api.py:49 +#: nova/network/base_api.py:48 msgid "Failed storing info cache" msgstr "Ha fallado el almacenamiento de memoria caché de información" -#: nova/network/base_api.py:68 +#: nova/network/base_api.py:67 msgid "instance is a required argument to use @refresh_cache" msgstr "la instancia es un argumento necesario para utilizar @refresh_cache " @@ -6609,51 +6615,51 @@ msgstr "La opción de controlador de red es necesaria, pero no se ha especificad msgid "Loading network driver '%s'" msgstr "Cargando controlador de red '%s'" -#: nova/network/floating_ips.py:90 +#: nova/network/floating_ips.py:85 #, python-format msgid "Fixed ip %s not found" msgstr "Direción IP fija %s no encontrada" -#: nova/network/floating_ips.py:180 +#: nova/network/floating_ips.py:175 #, python-format msgid "Floating IP %s is not associated. Ignore." msgstr "La IP flotante %s no está asociada. Ignorar." -#: nova/network/floating_ips.py:199 +#: nova/network/floating_ips.py:194 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "La dirección |%(address)s| no está asignada" -#: nova/network/floating_ips.py:203 +#: nova/network/floating_ips.py:198 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "La dirección |%(address)s| no está asignada al proyecto |%(project)s|" -#: nova/network/floating_ips.py:223 +#: nova/network/floating_ips.py:218 #, python-format msgid "Quota exceeded for %s, tried to allocate floating IP" msgstr "Cuota excedida para %s, intentando asignar dirección IP flotante" -#: nova/network/floating_ips.py:283 +#: nova/network/floating_ips.py:277 msgid "Failed to update usages deallocating floating IP" msgstr "No se han podido actualizar los usos desasignando IP flotante " -#: nova/network/floating_ips.py:385 +#: nova/network/floating_ips.py:375 #, python-format msgid "Failed to disassociated floating address: %s" msgstr "Fallo al desasociar la dirección IP flotante: %s" -#: nova/network/floating_ips.py:390 +#: nova/network/floating_ips.py:380 #, python-format msgid "Interface %s not found" msgstr "Interfaz %s no encontrada" -#: nova/network/floating_ips.py:553 +#: nova/network/floating_ips.py:539 #, python-format msgid "Starting migration network for instance %s" msgstr "Comenzando migración de red para la instancia %s" -#: nova/network/floating_ips.py:560 +#: nova/network/floating_ips.py:545 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " @@ -6662,12 +6668,12 @@ msgstr "" "La dirección IP flotante | %(address)s | ya no pertentece a la instancia " "%(instance_uuid)s. No será migrada" -#: nova/network/floating_ips.py:593 +#: nova/network/floating_ips.py:574 #, python-format msgid "Finishing migration network for instance %s" msgstr "Finalizando la migración de red para la instancia %s" -#: nova/network/floating_ips.py:601 +#: nova/network/floating_ips.py:581 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " @@ -6676,7 +6682,7 @@ msgstr "" "La dirección IP flotante |%(address)s| ya no pertenece a la instancia " "%(instance_uuid)s. No se configurará." -#: nova/network/floating_ips.py:644 +#: nova/network/floating_ips.py:624 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -6687,12 +6693,12 @@ msgstr "" "base de datos Nova pero no es visible para el controlador DNS de " "instancia o flotante. Se ignorará." -#: nova/network/floating_ips.py:684 +#: nova/network/floating_ips.py:664 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "El dominio |%(domain)s| ya existe, cambiando zona a |%(av_zone)s|." -#: nova/network/floating_ips.py:693 +#: nova/network/floating_ips.py:673 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "El dominio |%(domain)s| ya existe, cambiando el proyecto a |%(project)s." @@ -6723,17 +6729,17 @@ msgstr "Este controlador sólo soporta entradas de tipo 'a'." msgid "This shouldn't be getting called except during testing." msgstr "Esto no se debe llamar excepto durante las pruebas. " -#: nova/network/linux_net.py:227 +#: nova/network/linux_net.py:232 #, python-format msgid "Attempted to remove chain %s which does not exist" msgstr "Se ha intentado eliminar la cadena %s que no existe" -#: nova/network/linux_net.py:263 +#: nova/network/linux_net.py:268 #, python-format msgid "Unknown chain: %r" msgstr "Cadena desconocida: %r" -#: nova/network/linux_net.py:294 +#: nova/network/linux_net.py:301 #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " @@ -6742,52 +6748,52 @@ msgstr "" "Se ha intentado eliminar una regla que no estaba allí: %(chain)r %(rule)r" " %(wrap)r %(top)r" -#: nova/network/linux_net.py:762 +#: nova/network/linux_net.py:769 #, python-format msgid "Removed %(num)d duplicate rules for floating ip %(float)s" msgstr "Se han eliminado %(num)d reglas duplicadas para la IP flotante %(float)s" -#: nova/network/linux_net.py:810 +#: nova/network/linux_net.py:817 #, python-format msgid "Error deleting conntrack entries for %s" msgstr "Error al eliminar las entradas conntrack para %s" -#: nova/network/linux_net.py:1068 +#: nova/network/linux_net.py:1072 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "Excepción al recargar la configuración de dnsmasq: %s" -#: nova/network/linux_net.py:1150 +#: nova/network/linux_net.py:1154 #, python-format msgid "killing radvd threw %s" msgstr "Matando radvd lanzado %s" -#: nova/network/linux_net.py:1302 +#: nova/network/linux_net.py:1308 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "No se puede ejecutar %(cmd)s. Excepción: %(exception)s" -#: nova/network/linux_net.py:1360 +#: nova/network/linux_net.py:1366 #, python-format msgid "Failed removing net device: '%s'" msgstr "Fallo al remover dispositivo de red: '%s'" -#: nova/network/linux_net.py:1532 +#: nova/network/linux_net.py:1543 #, python-format msgid "Adding interface %(interface)s to bridge %(bridge)s" msgstr "Añadiendo la interfaz %(interface)s al puente %(bridge)s" -#: nova/network/linux_net.py:1538 +#: nova/network/linux_net.py:1549 #, python-format msgid "Failed to add interface: %s" msgstr "No se ha podido añadir interfaz: %s " -#: nova/network/manager.py:836 +#: nova/network/manager.py:828 #, python-format msgid "instance-dns-zone not found |%s|." msgstr "instance-dns-zone no encontrada |%s|" -#: nova/network/manager.py:843 +#: nova/network/manager.py:835 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -6798,56 +6804,56 @@ msgstr "" "|%(zone)s|. La instancia está en la zona |%(zone2)s|. No se creará ningún" " registro de DNS." -#: nova/network/manager.py:882 +#: nova/network/manager.py:874 #, python-format msgid "Quota exceeded for %s, tried to allocate fixed IP" msgstr "Cuota excedida para %s, intentando asignar dirección IP flotante" -#: nova/network/manager.py:942 +#: nova/network/manager.py:934 msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required." msgstr "" -#: nova/network/manager.py:972 +#: nova/network/manager.py:964 msgid "Failed to update usages deallocating fixed IP" msgstr "" "Se ha encontrado un error en la actualización de los usos desasignando IP" " flotante" -#: nova/network/manager.py:996 +#: nova/network/manager.py:988 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "No se puede liberar %s porque vif no existe." -#: nova/network/manager.py:1037 +#: nova/network/manager.py:1029 #, python-format msgid "IP %s leased that is not associated" msgstr "La IP %s alquilada que no está asociada " -#: nova/network/manager.py:1043 +#: nova/network/manager.py:1035 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "IP |%s| alquilada que no está asignada" -#: nova/network/manager.py:1052 +#: nova/network/manager.py:1044 #, python-format msgid "IP %s released that is not associated" msgstr "IP %s liberada que no está asociada" -#: nova/network/manager.py:1056 +#: nova/network/manager.py:1048 #, python-format msgid "IP %s released that was not leased" msgstr "IP %s liberada que no está alquilada" -#: nova/network/manager.py:1074 +#: nova/network/manager.py:1066 #, python-format msgid "%s must be an integer" msgstr "%s debe ser un entero " -#: nova/network/manager.py:1106 +#: nova/network/manager.py:1098 msgid "Maximum allowed length for 'label' is 255." msgstr "La longitud máxima permitida para 'label' es 255." -#: nova/network/manager.py:1126 +#: nova/network/manager.py:1118 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " @@ -6856,18 +6862,18 @@ msgstr "" "Subred(es) demasiado grande(s), se usará el valor predeterminado /%s. " "Para sustituirlo, especifique el distintivo network_size." -#: nova/network/manager.py:1211 +#: nova/network/manager.py:1203 msgid "cidr already in use" msgstr "cidr ya se está utilizando" -#: nova/network/manager.py:1214 +#: nova/network/manager.py:1206 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "" "cidr solicitado (%(cidr)s) está en conflicto con superred existente " "(%(super)s)" -#: nova/network/manager.py:1225 +#: nova/network/manager.py:1217 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " @@ -6876,12 +6882,12 @@ msgstr "" "el cidr solicitado (%(cidr)s) está en conflicto con el cidr más pequeño " "existente (%(smaller)s)" -#: nova/network/manager.py:1320 +#: nova/network/manager.py:1311 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "La red se debe desasociar el proyecto %s antes de la supresión" -#: nova/network/manager.py:1949 +#: nova/network/manager.py:1937 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" @@ -6889,7 +6895,7 @@ msgstr "" "La suma entre el número de redes y el inicio de vlan no puede ser mayor " "que 4094" -#: nova/network/manager.py:1956 +#: nova/network/manager.py:1944 #, python-format msgid "" "The network range is not big enough to fit %(num_networks)s networks. " @@ -7134,22 +7140,22 @@ msgstr "Esta regla ya existe en el grupo %s" msgid "Error setting %(attr)s" msgstr "Error al establecer %(attr)s" -#: nova/objects/base.py:247 +#: nova/objects/base.py:256 #, python-format msgid "Unable to instantiate unregistered object type %(objtype)s" msgstr "Incapaz de instanciar tipo de objeto no registrado %(objtype)s" -#: nova/objects/base.py:366 +#: nova/objects/base.py:375 #, python-format msgid "Cannot load '%s' in the base class" msgstr "No se puede cargar '%s' en la clase base" -#: nova/objects/base.py:412 +#: nova/objects/base.py:421 #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "El objeto %(objname)s no tiene atributo '%(attrname)s'" -#: nova/objects/block_device.py:136 +#: nova/objects/block_device.py:149 msgid "Volume does not belong to the requested instance." msgstr "El volumen no pertenece a la instancia solicitada." @@ -7163,44 +7169,44 @@ msgstr "La clave %(key)s debe ser de tipo %(expected)s y no del tipo %(actual)s" msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s" msgstr "El elemento %(key)s:%(val)s debe ser de tipo %(expected)s y no %(actual)s" -#: nova/objects/fields.py:157 +#: nova/objects/fields.py:165 #, python-format msgid "Field `%s' cannot be None" msgstr "El campo `%s' no puede ser Ninguno" -#: nova/objects/fields.py:232 +#: nova/objects/fields.py:246 #, python-format msgid "A string is required here, not %s" msgstr "Se requiere una cadena aqui, no %s" -#: nova/objects/fields.py:268 +#: nova/objects/fields.py:286 msgid "A datetime.datetime is required here" msgstr "Se requiere un datetime.datetime aquí" -#: nova/objects/fields.py:306 nova/objects/fields.py:315 -#: nova/objects/fields.py:324 +#: nova/objects/fields.py:328 nova/objects/fields.py:337 +#: nova/objects/fields.py:346 #, python-format msgid "Network \"%s\" is not valid" msgstr "La red \"%s\" no es válida" -#: nova/objects/fields.py:363 +#: nova/objects/fields.py:385 msgid "A list is required here" msgstr "Aquí se requiere una lista" -#: nova/objects/fields.py:379 +#: nova/objects/fields.py:405 msgid "A dict is required here" msgstr "Aquí se requiere un diccionario" -#: nova/objects/fields.py:418 +#: nova/objects/fields.py:449 #, python-format msgid "An object of type %s is required here" msgstr "Aquí se requiere un objeto del tipo %s" -#: nova/objects/fields.py:445 +#: nova/objects/fields.py:488 msgid "A NetworkModel is required here" msgstr "aquí se requiere un NetworkModel" -#: nova/objects/instance.py:432 +#: nova/objects/instance.py:431 #, python-format msgid "No save handler for %s" msgstr "No hay manejador de guardado para %s" @@ -7235,7 +7241,7 @@ msgstr "" "Se ha encontrado la lista de instantáneas pero no se ha encontrado " "ninguna cabecera." -#: nova/openstack/common/lockutils.py:102 +#: nova/openstack/common/lockutils.py:101 #, python-format msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" @@ -7260,7 +7266,7 @@ msgstr "El recurso syslog debe ser uno de: %s" msgid "Fatal call to deprecated config: %(msg)s" msgstr "Llamada muy grave a configuración en desuso: %(msg)s" -#: nova/openstack/common/periodic_task.py:39 +#: nova/openstack/common/periodic_task.py:40 #, python-format msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "Argumento inesperado para la creación de tarea periódica: %(arg)s." @@ -7333,12 +7339,12 @@ msgstr "Versión SSL inválida : %s" msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "Valor '%(val)s' no reconocido, los valores aceptables son: %(acceptable)s" -#: nova/openstack/common/strutils.py:202 +#: nova/openstack/common/strutils.py:197 #, python-format msgid "Invalid unit system: \"%s\"" msgstr "Unidad del sistema no valida: \"%s\"" -#: nova/openstack/common/strutils.py:211 +#: nova/openstack/common/strutils.py:206 #, python-format msgid "Invalid string format: %s" msgstr "Formato inválido de cadena: %s" @@ -7470,16 +7476,16 @@ msgstr "" msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s" msgstr "Eligiendo anfitrión %(weighed_host)s para la instancia %(instance_uuid)s" -#: nova/scheduler/filter_scheduler.py:170 +#: nova/scheduler/filter_scheduler.py:169 msgid "Instance disappeared during scheduling" msgstr "La instancia ha desaparecido durante la programación" -#: nova/scheduler/host_manager.py:173 +#: nova/scheduler/host_manager.py:169 #, python-format msgid "Metric name unknown of %r" msgstr "Nombre de métrica desconocido para %r" -#: nova/scheduler/host_manager.py:188 +#: nova/scheduler/host_manager.py:184 #, python-format msgid "" "Host has more disk space than database expected (%(physical)sgb > " @@ -7488,42 +7494,42 @@ msgstr "" "El anfitrión tiene más espacio en disco que lo esperado por la base de " "datos (%(physical)sgb > %(database)sgb)" -#: nova/scheduler/host_manager.py:365 +#: nova/scheduler/host_manager.py:311 #, python-format msgid "Host filter ignoring hosts: %s" msgstr "Filtro de anfitrión ignorando huéspedes: %s" -#: nova/scheduler/host_manager.py:377 +#: nova/scheduler/host_manager.py:323 #, python-format msgid "Host filter forcing available hosts to %s" msgstr "Filtro de anfitrión forzando a los huéspedes disponibles a %s" -#: nova/scheduler/host_manager.py:380 +#: nova/scheduler/host_manager.py:326 #, python-format msgid "No hosts matched due to not matching 'force_hosts' value of '%s'" msgstr "" "No se han relacionado anfitriones debido a que no hay valores " "relacionados de '%s' a 'force_hosts'" -#: nova/scheduler/host_manager.py:393 +#: nova/scheduler/host_manager.py:339 #, python-format msgid "Host filter forcing available nodes to %s" msgstr "Filtro de anfitriones forzando nodos disponibles a %s" -#: nova/scheduler/host_manager.py:396 +#: nova/scheduler/host_manager.py:342 #, python-format msgid "No nodes matched due to not matching 'force_nodes' value of '%s'" msgstr "" "No se han relacionado nodos debido a que no hay valores relacionados de " "'%s' a 'force_nodes'" -#: nova/scheduler/host_manager.py:444 +#: nova/scheduler/host_manager.py:390 #: nova/scheduler/filters/trusted_filter.py:208 #, python-format msgid "No service for compute ID %s" msgstr "No hay servicio para el ID de cálculo %s " -#: nova/scheduler/host_manager.py:462 +#: nova/scheduler/host_manager.py:408 #, python-format msgid "Removing dead compute node %(host)s:%(node)s from scheduler" msgstr "Eliminando nodo de cálculo inactivo %(host)s:%(node)s del planificador" @@ -7701,12 +7707,16 @@ msgstr "La instancia y el volumen no están en la misma availability_zone" msgid "already detached" msgstr "ya está desconectado" -#: nova/tests/api/test_auth.py:97 +#: nova/tests/api/test_auth.py:98 msgid "unexpected role header" msgstr "cabecera de rol inesperada" -#: nova/tests/api/openstack/compute/test_servers.py:3202 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2425 +#: nova/tests/api/openstack/test_faults.py:46 +msgid "Should be translated." +msgstr "" + +#: nova/tests/api/openstack/compute/test_servers.py:3225 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2434 msgid "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" @@ -7714,15 +7724,15 @@ msgstr "" "Se ha superado la cuota para las instancias: solicitada 1, pero ya se han" " utilizado 10 de 10 instancias" -#: nova/tests/api/openstack/compute/test_servers.py:3207 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2430 +#: nova/tests/api/openstack/compute/test_servers.py:3230 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2439 msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" msgstr "" "Se ha superado la cuota para ram: Solicitadas 4096, ya utilizadas 8192 de" " 10240 ram" -#: nova/tests/api/openstack/compute/test_servers.py:3212 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2435 +#: nova/tests/api/openstack/compute/test_servers.py:3235 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2444 msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "" "Se ha superado la cuota para núcleos: Solicitados 2, pero ya utilizados 9" @@ -7732,7 +7742,7 @@ msgstr "" #: nova/tests/compute/test_compute.py:1707 #: nova/tests/compute/test_compute.py:1785 #: nova/tests/compute/test_compute.py:1825 -#: nova/tests/compute/test_compute.py:5546 +#: nova/tests/compute/test_compute.py:5603 #, python-format msgid "Running instances: %s" msgstr "Ejecutando instancias: %s" @@ -7744,16 +7754,16 @@ msgstr "Ejecutando instancias: %s" msgid "After terminating instances: %s" msgstr "Después de terminar las instancias: %s" -#: nova/tests/compute/test_compute.py:5557 +#: nova/tests/compute/test_compute.py:5614 #, python-format msgid "After force-killing instances: %s" msgstr "Después de finalizar de forma forzada las instancias: %s" -#: nova/tests/compute/test_compute.py:6173 +#: nova/tests/compute/test_compute.py:6229 msgid "wrong host/node" msgstr "host/nodo incorrecto" -#: nova/tests/compute/test_compute.py:10753 +#: nova/tests/compute/test_compute.py:10820 msgid "spawn error" msgstr "error de generación" @@ -7853,27 +7863,27 @@ msgstr "Cuerpo: %s" msgid "Unexpected status code" msgstr "Código de estado inesperado" -#: nova/tests/virt/hyperv/test_hypervapi.py:512 +#: nova/tests/virt/hyperv/test_hypervapi.py:517 msgid "fake vswitch not found" msgstr "vswitch falso no encontrado" -#: nova/tests/virt/hyperv/test_hypervapi.py:965 +#: nova/tests/virt/hyperv/test_hypervapi.py:970 msgid "Simulated failure" msgstr "Falla simulada" -#: nova/tests/virt/libvirt/fakelibvirt.py:1019 +#: nova/tests/virt/libvirt/fakelibvirt.py:1041 msgid "Expected a list for 'auth' parameter" msgstr "Se esperaba una lista para el parámetro 'auth'" -#: nova/tests/virt/libvirt/fakelibvirt.py:1023 +#: nova/tests/virt/libvirt/fakelibvirt.py:1045 msgid "Expected a function in 'auth[0]' parameter" msgstr "Se esperaba una función en el parámetro 'auth[0]' " -#: nova/tests/virt/libvirt/fakelibvirt.py:1027 +#: nova/tests/virt/libvirt/fakelibvirt.py:1049 msgid "Expected a function in 'auth[1]' parameter" msgstr "Se esperaba una función en el parámetro 'auth[1]' " -#: nova/tests/virt/libvirt/fakelibvirt.py:1038 +#: nova/tests/virt/libvirt/fakelibvirt.py:1060 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." @@ -7881,8 +7891,32 @@ msgstr "" "virEventRegisterDefaultImpl() debe ser invocado antes de que la conexión " "sea utilizada." -#: nova/tests/virt/vmwareapi/test_vm_util.py:196 -#: nova/virt/vmwareapi/vm_util.py:1087 +#: nova/tests/virt/vmwareapi/fake.py:244 +#, python-format +msgid "Property %(attr)s not set for the managed object %(name)s" +msgstr "" +"La propiedad %(attr)s no se ha establecido para el objeto gestionado " +"%(name)s" + +#: nova/tests/virt/vmwareapi/fake.py:969 +msgid "There is no VM registered" +msgstr "No hay ninguna VM registrada" + +#: nova/tests/virt/vmwareapi/fake.py:971 nova/tests/virt/vmwareapi/fake.py:1307 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "La máquina virtual con la referencia %s no está allí" + +#: nova/tests/virt/vmwareapi/fake.py:1096 +msgid "Session Invalid" +msgstr "Sesión no válida" + +#: nova/tests/virt/vmwareapi/fake.py:1304 +msgid "No Virtual Machine has been registered yet" +msgstr "No se ha registrado aún ninguna máquina virtual " + +#: nova/tests/virt/vmwareapi/test_ds_util.py:221 +#: nova/virt/vmwareapi/ds_util.py:265 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7898,8 +7932,8 @@ msgstr "" "Copia dispersa en progreso, %(complete_pct).2f%% completado. %(left)s " "bytes restantes para copiar." -#: nova/tests/virt/xenapi/image/test_bittorrent.py:126 -#: nova/virt/xenapi/image/bittorrent.py:81 +#: nova/tests/virt/xenapi/image/test_bittorrent.py:125 +#: nova/virt/xenapi/image/bittorrent.py:80 msgid "" "Cannot create default bittorrent URL without torrent_base_url set or " "torrent URL fetcher extension" @@ -7907,8 +7941,8 @@ msgstr "" "No se puede crear la URL predeterminada de bittorrent sin establecer " "torrent_base_url la extensión de búsqueda de URL torrent" -#: nova/tests/virt/xenapi/image/test_bittorrent.py:160 -#: nova/virt/xenapi/image/bittorrent.py:85 +#: nova/tests/virt/xenapi/image/test_bittorrent.py:159 +#: nova/virt/xenapi/image/bittorrent.py:84 msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "Se han encontrado múltiples URL de buscadores torrent. Fallando." @@ -7924,69 +7958,89 @@ msgstr "" msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "Arrancando con el volumen %(volume_id)s en %(mountpoint)s" -#: nova/virt/cpu.py:56 nova/virt/cpu.py:60 -#, python-format -msgid "Invalid range expression %r" -msgstr "Expresión de intérvalo inválida %" - -#: nova/virt/cpu.py:69 -#, python-format -msgid "Invalid exclusion expression %r" -msgstr "Expresión de exclusión inválida %r" - -#: nova/virt/cpu.py:76 -#, python-format -msgid "Invalid inclusion expression %r" -msgstr "Expresión de inclusión inválida %" - -#: nova/virt/cpu.py:81 -#, python-format -msgid "No CPUs available after parsing %r" -msgstr "CPU's no disponibles después de analizar %r" - -#: nova/virt/driver.py:1207 +#: nova/virt/driver.py:1242 msgid "Event must be an instance of nova.virt.event.Event" msgstr "El suceso debe ser una instancia de un nova.virt.event.Event" -#: nova/virt/driver.py:1213 +#: nova/virt/driver.py:1248 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "Excepción al asignar el suceso %(event)s: %(ex)s" -#: nova/virt/driver.py:1295 +#: nova/virt/driver.py:1330 msgid "Compute driver option required, but not specified" msgstr "" "La opción de controlador de cálculo es necesaria, pero no se ha " "especificado" -#: nova/virt/driver.py:1298 +#: nova/virt/driver.py:1333 #, python-format msgid "Loading compute driver '%s'" msgstr "Cargando controlador de cálculo '%s' " -#: nova/virt/driver.py:1305 +#: nova/virt/driver.py:1340 msgid "Unable to load the virtualization driver" msgstr "Incapaz de cargar el controlador de virtualización" -#: nova/virt/fake.py:216 +#: nova/virt/event.py:33 +msgid "Started" +msgstr "Arrancado" + +#: nova/virt/event.py:34 +msgid "Stopped" +msgstr "" + +#: nova/virt/event.py:35 +msgid "Paused" +msgstr "Pausada" + +#: nova/virt/event.py:36 +msgid "Resumed" +msgstr "Reanudada" + +#: nova/virt/event.py:108 +msgid "Unknown" +msgstr "Desconocido" + +#: nova/virt/fake.py:217 #, python-format msgid "Key '%(key)s' not in instances '%(inst)s'" msgstr "La clave '%(key)s' no está en las instancias '%(inst)s'" -#: nova/virt/firewall.py:178 +#: nova/virt/firewall.py:176 msgid "Attempted to unfilter instance which is not filtered" msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada" -#: nova/virt/images.py:86 +#: nova/virt/hardware.py:45 +#, python-format +msgid "No CPUs available after parsing %r" +msgstr "CPU's no disponibles después de analizar %r" + +#: nova/virt/hardware.py:77 nova/virt/hardware.py:81 +#, python-format +msgid "Invalid range expression %r" +msgstr "Expresión de intérvalo inválida %" + +#: nova/virt/hardware.py:90 +#, python-format +msgid "Invalid exclusion expression %r" +msgstr "Expresión de exclusión inválida %r" + +#: nova/virt/hardware.py:97 +#, python-format +msgid "Invalid inclusion expression %r" +msgstr "Expresión de inclusión inválida %" + +#: nova/virt/images.py:81 msgid "'qemu-img info' parsing failed." msgstr "Se ha encontrado un error en el análisis de 'qemu-img info'." -#: nova/virt/images.py:92 +#: nova/virt/images.py:87 #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s" -#: nova/virt/images.py:105 +#: nova/virt/images.py:100 #, python-format msgid "" "%(base)s virtual size %(disk_size)s larger than flavor root disk size " @@ -7995,12 +8049,12 @@ msgstr "" "El tamaño virtual %(disk_size)s de %(base)s es más grande que el tamaño " "del disco raíz del sabor %(size)s" -#: nova/virt/images.py:122 +#: nova/virt/images.py:117 #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertido a sin formato, pero el formato es ahora %s" -#: nova/virt/storage_users.py:63 nova/virt/storage_users.py:101 +#: nova/virt/storage_users.py:64 nova/virt/storage_users.py:102 #, python-format msgid "Cannot decode JSON from %(id_path)s" msgstr "No se puede decodificar el JSOON de %(id_path)s" @@ -8049,31 +8103,31 @@ msgstr "" "El gestor de alimentación de máquina vacía no ha podido reiniciar el nodo" " para la instancia %r" -#: nova/virt/baremetal/driver.py:375 +#: nova/virt/baremetal/driver.py:376 #, python-format msgid "Destroy called on non-existing instance %s" msgstr "Se ha llamado una destrucción en una instancia no existente %s" -#: nova/virt/baremetal/driver.py:393 +#: nova/virt/baremetal/driver.py:394 #, python-format msgid "Error from baremetal driver during destroy: %s" msgstr "Error del controlador de máquina vacía durante la destrucción: %s" -#: nova/virt/baremetal/driver.py:398 +#: nova/virt/baremetal/driver.py:399 #, python-format msgid "Error while recording destroy failure in baremetal database: %s" msgstr "" "Error al registrar la anomalía de destrcción en la base de datos de " "máquina vacía: %s" -#: nova/virt/baremetal/driver.py:413 +#: nova/virt/baremetal/driver.py:414 #, python-format msgid "Baremetal power manager failed to stop node for instance %r" msgstr "" "El gestor de alimentación de máquina vacía no ha podido detener el nodo " "para la instancia %r" -#: nova/virt/baremetal/driver.py:426 +#: nova/virt/baremetal/driver.py:427 #, python-format msgid "Baremetal power manager failed to start node for instance %r" msgstr "" @@ -8330,16 +8384,16 @@ msgstr "Error al ejecutar comando: %s" msgid "baremetal driver was unable to delete tid %s" msgstr "el controlador de máquina vacía no ha podido suprimir el tid %s" -#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:189 +#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:196 msgid "Could not determine iscsi initiator name" msgstr "No se ha podido determinar el nombre de iniciador iscsi " -#: nova/virt/baremetal/volume_driver.py:234 +#: nova/virt/baremetal/volume_driver.py:225 #, python-format msgid "No fixed PXE IP is associated to %s" msgstr "No hay ninguna IP PXE fija asociada a %s" -#: nova/virt/baremetal/volume_driver.py:288 +#: nova/virt/baremetal/volume_driver.py:283 #, python-format msgid "detach volume could not find tid for %s" msgstr "el volumen de desconexión no ha podido encontrar tid para %s" @@ -8371,16 +8425,16 @@ msgstr "La interfaz de máquina vacía %s ya se está utilizando" msgid "Baremetal virtual interface %s not found" msgstr "No se ha encontrado la interfaz virtual de máquina vacía %s" -#: nova/virt/disk/api.py:285 +#: nova/virt/disk/api.py:280 msgid "image already mounted" msgstr "imagen ya montada" -#: nova/virt/disk/api.py:359 +#: nova/virt/disk/api.py:354 #, python-format msgid "Ignoring error injecting data into image (%(e)s)" msgstr "Ignorando el error al inyectar datos en la imagen (%(e)s)" -#: nova/virt/disk/api.py:381 +#: nova/virt/disk/api.py:376 #, python-format msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': " @@ -8389,31 +8443,31 @@ msgstr "" "Se ha encontrado un error en el montaje del sistema de archivos de " "contenedor '%(image)s' en '%(target)s': : %(errors)s" -#: nova/virt/disk/api.py:411 +#: nova/virt/disk/api.py:406 #, python-format msgid "Failed to teardown container filesystem: %s" msgstr "Fallo al desarmar el contenedor de sistema de archivo: %s" -#: nova/virt/disk/api.py:424 +#: nova/virt/disk/api.py:419 #, python-format msgid "Failed to umount container filesystem: %s" msgstr "No se ha podido desmontar el sistema de archivos de contenedor: %s" -#: nova/virt/disk/api.py:449 +#: nova/virt/disk/api.py:444 #, python-format msgid "Ignoring error injecting %(inject)s into image (%(e)s)" msgstr "Ignorando el error al inyectar %(inject)s en la imagen (%(e)s)" -#: nova/virt/disk/api.py:609 +#: nova/virt/disk/api.py:604 msgid "Not implemented on Windows" msgstr "No implementado en Windows" -#: nova/virt/disk/api.py:636 +#: nova/virt/disk/api.py:631 #, python-format msgid "User %(username)s not found in password file." msgstr "El usuario %(username)s no se ha encontrado en el archivo de contraseña." -#: nova/virt/disk/api.py:652 +#: nova/virt/disk/api.py:647 #, python-format msgid "User %(username)s not found in shadow file." msgstr "El usuario %(username)s no se ha encontrado en el archivo de duplicación. " @@ -8550,11 +8604,11 @@ msgstr "" "No se puede encontrar el nombre de iniciador ISCSI. Eligiendo el " "predeterminado" -#: nova/virt/hyperv/driver.py:165 +#: nova/virt/hyperv/driver.py:169 msgid "VIF plugging is not supported by the Hyper-V driver." msgstr "Conexión de VIF no está soportado por el driver de Hyper-V." -#: nova/virt/hyperv/driver.py:170 +#: nova/virt/hyperv/driver.py:174 msgid "VIF unplugging is not supported by the Hyper-V driver." msgstr "Desconexión de VIF no está soportado por el driver de Hyper-V." @@ -8643,7 +8697,7 @@ msgstr "" msgid "No external vswitch found" msgstr "No se ha encontrado vswitch externo" -#: nova/virt/hyperv/pathutils.py:71 +#: nova/virt/hyperv/pathutils.py:72 #, python-format msgid "The file copy from %(src)s to %(dest)s failed" msgstr "Se ha encontrado un error en la copia del archivo de %(src)s a %(dest)s" @@ -8653,30 +8707,30 @@ msgstr "Se ha encontrado un error en la copia del archivo de %(src)s a %(dest)s" msgid "Failed to remove snapshot for VM %s" msgstr "No se ha podido eliminar la instantánea para VM %s" -#: nova/virt/hyperv/vhdutils.py:65 nova/virt/hyperv/vhdutilsv2.py:63 +#: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64 #, python-format msgid "Unsupported disk format: %s" msgstr "Formato de disco no soportado: %s" -#: nova/virt/hyperv/vhdutils.py:150 +#: nova/virt/hyperv/vhdutils.py:151 #, python-format msgid "The %(vhd_type)s type VHD is not supported" msgstr "El VHD de tipo %(vhd_type)s no está soportado" -#: nova/virt/hyperv/vhdutils.py:161 +#: nova/virt/hyperv/vhdutils.py:162 #, python-format msgid "Unable to obtain block size from VHD %(vhd_path)s" msgstr "Incapaz de obtener el tamaño de bloque del VHD %(vhd_path)s" -#: nova/virt/hyperv/vhdutils.py:208 +#: nova/virt/hyperv/vhdutils.py:209 msgid "Unsupported virtual disk format" msgstr "Formato de disco virtual no soportado." -#: nova/virt/hyperv/vhdutilsv2.py:134 +#: nova/virt/hyperv/vhdutilsv2.py:135 msgid "Differencing VHDX images are not supported" msgstr "La diferenciación de imágenes VHDX no está soportada" -#: nova/virt/hyperv/vhdutilsv2.py:157 +#: nova/virt/hyperv/vhdutilsv2.py:158 #, python-format msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s" msgstr "" @@ -8701,12 +8755,12 @@ msgstr "" msgid "Spawning new instance" msgstr "Generando nueva instancia" -#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:520 +#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:567 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format \"%s\" no válido" -#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:524 +#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:571 msgid "Using config drive for instance" msgstr "Utilizando dispositivo de configuración para instancia" @@ -8715,7 +8769,7 @@ msgstr "Utilizando dispositivo de configuración para instancia" msgid "Creating config drive at %(path)s" msgstr "Creando unidad de configuración en %(path)s" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:549 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:596 #, python-format msgid "Creating config drive failed with error: %s" msgstr "La creación de unidad de configuración ha fallado con el error: %s" @@ -8778,12 +8832,12 @@ msgstr "Tarea WMI fallida con estado %d. No hay descripción de error disponible msgid "Metrics collection is not supported on this version of Hyper-V" msgstr "La recolección de métricas no está soportada en esta versión de Hyper-V" -#: nova/virt/hyperv/volumeops.py:146 +#: nova/virt/hyperv/volumeops.py:148 #, python-format msgid "Unable to attach volume to instance %s" msgstr "Imposible adjuntar volumen a la instancia %s" -#: nova/virt/hyperv/volumeops.py:215 nova/virt/hyperv/volumeops.py:229 +#: nova/virt/hyperv/volumeops.py:222 nova/virt/hyperv/volumeops.py:236 #, python-format msgid "Unable to find a mounted disk for target_iqn: %s" msgstr "No se ha podido encontrar un disco montado para target_iqn: %s " @@ -8813,21 +8867,21 @@ msgstr "No hay nombres de dispositivo de disco libres para el prefijo '%s'" msgid "Unable to determine disk bus for '%s'" msgstr "No se puede determinar el bus de disco para '%s'" -#: nova/virt/libvirt/driver.py:542 +#: nova/virt/libvirt/driver.py:556 #, python-format msgid "Connection to libvirt lost: %s" msgstr "Conexión hacia libvirt perdida: %s" -#: nova/virt/libvirt/driver.py:724 +#: nova/virt/libvirt/driver.py:739 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "No se puede manejar la solicitud de autenticación para las credenciales %d" -#: nova/virt/libvirt/driver.py:868 +#: nova/virt/libvirt/driver.py:932 msgid "operation time out" msgstr "Tiempo de espera agotado para la operación" -#: nova/virt/libvirt/driver.py:1187 +#: nova/virt/libvirt/driver.py:1257 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " @@ -8836,61 +8890,61 @@ msgstr "" "El volúmen establece el tamaño de bloque, pero el hipervisor libvirt " "actual '%s' no soporta tamaño de bloque personalizado." -#: nova/virt/libvirt/driver.py:1194 +#: nova/virt/libvirt/driver.py:1264 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" "El volúmen establece el tamaño de bloque, pero se requiere libvirt '%s' o" " mayor." -#: nova/virt/libvirt/driver.py:1292 +#: nova/virt/libvirt/driver.py:1352 msgid "Swap only supports host devices" msgstr "El espacio de intercambio solamente soporta dispositivos de anfitrión " -#: nova/virt/libvirt/driver.py:1579 +#: nova/virt/libvirt/driver.py:1635 msgid "libvirt error while requesting blockjob info." msgstr "error de libvirt al solicitar información de blockjob." -#: nova/virt/libvirt/driver.py:1712 +#: nova/virt/libvirt/driver.py:1776 msgid "Found no disk to snapshot." msgstr "No se ha encontrado disco relacionado a instantánea." -#: nova/virt/libvirt/driver.py:1790 +#: nova/virt/libvirt/driver.py:1868 #, python-format msgid "Unknown type: %s" msgstr "Tipo desconocido: %s" -#: nova/virt/libvirt/driver.py:1795 +#: nova/virt/libvirt/driver.py:1873 msgid "snapshot_id required in create_info" msgstr "snapshot_id es requerido en create_info" -#: nova/virt/libvirt/driver.py:1853 +#: nova/virt/libvirt/driver.py:1931 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" "Libvirt '%s' o mayor se requiere para remoción en línea de instantáneas " "de volumen." -#: nova/virt/libvirt/driver.py:1860 +#: nova/virt/libvirt/driver.py:1938 #, python-format msgid "Unknown delete_info type %s" msgstr "Tipo delete_info %s desconocido" -#: nova/virt/libvirt/driver.py:1890 +#: nova/virt/libvirt/driver.py:1966 #, python-format -msgid "Unable to locate disk matching id: %s" -msgstr "Incapaz de localizar identificador de disco coincidente: %s" +msgid "Disk with id: %s not found attached to instance." +msgstr "" -#: nova/virt/libvirt/driver.py:2330 nova/virt/xenapi/vmops.py:1552 +#: nova/virt/libvirt/driver.py:2407 nova/virt/xenapi/vmops.py:1552 msgid "Guest does not have a console available" msgstr "El invitado no tiene una consola disponible" -#: nova/virt/libvirt/driver.py:2746 +#: nova/virt/libvirt/driver.py:2823 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "La remoción de dispositivos PCI con libvirt < %(ver)s no está permitida" -#: nova/virt/libvirt/driver.py:2912 +#: nova/virt/libvirt/driver.py:2989 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " @@ -8899,19 +8953,19 @@ msgstr "" "La configuración ha solicitado un modelo CPU explícito, pero el " "hipervisor libvirt actual '%s' no soporta la selección de modelos de CPU" -#: nova/virt/libvirt/driver.py:2918 +#: nova/virt/libvirt/driver.py:2995 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" "La configuración ha solicitado un modelo de CPU personalizado, pero no se" " ha proporcionado ningún nombre de modelo" -#: nova/virt/libvirt/driver.py:2922 +#: nova/virt/libvirt/driver.py:2999 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" "No se debe establecer un nombre de modelo de CPU cuando se solicita un " "modelo de CPU de host" -#: nova/virt/libvirt/driver.py:2942 +#: nova/virt/libvirt/driver.py:3019 msgid "" "Passthrough of the host CPU was requested but this libvirt version does " "not support this feature" @@ -8919,7 +8973,7 @@ msgstr "" "Se ha solicitado el paso a través de la CPU de host pero esta versión de " "libvirt no soporta esta función" -#: nova/virt/libvirt/driver.py:3475 +#: nova/virt/libvirt/driver.py:3567 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " @@ -8928,7 +8982,7 @@ msgstr "" "Error de libvirt durante la búsqueda de %(instance_id)s: [Código de Error" " %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3496 +#: nova/virt/libvirt/driver.py:3588 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " @@ -8937,27 +8991,27 @@ msgstr "" "Error de libvirt al buscar %(instance_name)s: [Código de error " "%(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3760 +#: nova/virt/libvirt/driver.py:3851 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "Configuración vcpu_pin_set inválida, fuera de rango de cpu de hipervisor." -#: nova/virt/libvirt/driver.py:3890 +#: nova/virt/libvirt/driver.py:3974 msgid "libvirt version is too old (does not support getVersion)" msgstr "La versión libvirt es demasiado antigua (no soporta getVersion)" -#: nova/virt/libvirt/driver.py:4251 +#: nova/virt/libvirt/driver.py:4335 msgid "Block migration can not be used with shared storage." msgstr "" "No se puede utilizar la migración de bloque con almacenamiento " "compartido. " -#: nova/virt/libvirt/driver.py:4259 +#: nova/virt/libvirt/driver.py:4344 msgid "Live migration can not be used without shared storage." msgstr "" "No se puede utilizar la migración en directo con almacenamiento " "compartido." -#: nova/virt/libvirt/driver.py:4303 +#: nova/virt/libvirt/driver.py:4414 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " @@ -8967,7 +9021,7 @@ msgstr "" "demasiado grande (disponible en host de destino: %(available)s < " "necesario: %(necessary)s)" -#: nova/virt/libvirt/driver.py:4342 +#: nova/virt/libvirt/driver.py:4453 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8982,44 +9036,72 @@ msgstr "" "\n" "Consulte %(u)s" -#: nova/virt/libvirt/driver.py:4409 +#: nova/virt/libvirt/driver.py:4516 #, python-format msgid "The firewall filter for %s does not exist" msgstr "El filtro de cortafuegos para %s no existe " -#: nova/virt/libvirt/driver.py:4900 +#: nova/virt/libvirt/driver.py:4579 +msgid "" +"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " +"or your destination node does not support retrieving listen addresses. " +"In order for live migration to work properly, you must configure the " +"graphics (VNC and/or SPICE) listen addresses to be either the catch-all " +"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." +msgstr "" + +#: nova/virt/libvirt/driver.py:4596 +msgid "" +"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," +" and the graphics (VNC and/or SPICE) listen addresses on the destination" +" node do not match the addresses on the source node. Since the source " +"node has listen addresses set to either the catch-all address (0.0.0.0 or" +" ::) or the local address (127.0.0.1 or ::1), the live migration will " +"succeed, but the VM will continue to listen on the current addresses." +msgstr "" + +#: nova/virt/libvirt/driver.py:4964 +#, python-format +msgid "" +"Error from libvirt while getting description of %(instance_name)s: [Error" +" Code %(error_code)s] %(ex)s" +msgstr "" +"Error de libvirt al obtener la descripción de %(instance_name)s: [Código " +"de error %(error_code)s] %(ex)s" + +#: nova/virt/libvirt/driver.py:5090 msgid "Unable to resize disk down." msgstr "Incapaz de reducir el tamaño del disco." -#: nova/virt/libvirt/imagebackend.py:258 +#: nova/virt/libvirt/imagebackend.py:257 #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "No se puede cargar la linea %(line)s, se ha obtenido el error %(error)s" -#: nova/virt/libvirt/imagebackend.py:273 +#: nova/virt/libvirt/imagebackend.py:272 msgid "Attempted overwrite of an existing value." msgstr "Se ha intentado sobreescribir un valor ya existente." -#: nova/virt/libvirt/imagebackend.py:429 +#: nova/virt/libvirt/imagebackend.py:433 msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Debes especificar la bandera images_volue_group para utilizar imagenes " "LVM." -#: nova/virt/libvirt/imagebackend.py:544 +#: nova/virt/libvirt/imagebackend.py:548 msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "Debes especificar la bandera images_rbd_pool para utilizar imagenes rbd." -#: nova/virt/libvirt/imagebackend.py:658 +#: nova/virt/libvirt/imagebackend.py:660 msgid "rbd python libraries not found" msgstr "Las librerías rbd python no han sido encontradas" -#: nova/virt/libvirt/imagebackend.py:697 +#: nova/virt/libvirt/imagebackend.py:703 #, python-format msgid "Unknown image_type=%s" msgstr "image_type=%s desconocido " -#: nova/virt/libvirt/lvm.py:55 +#: nova/virt/libvirt/lvm.py:54 #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db " @@ -9028,17 +9110,17 @@ msgstr "" "Espacio insuficiente en grupo de volumen %(vg)s. Sólo %(free_space)db " "disponibles, pero se necesitan %(size)db para el volumen %(lv)s." -#: nova/virt/libvirt/lvm.py:103 +#: nova/virt/libvirt/lvm.py:102 #, python-format msgid "vg %s must be LVM volume group" msgstr "El grupo de volúmenes %s debe ser el grupo de volúmenes LVM" -#: nova/virt/libvirt/lvm.py:146 +#: nova/virt/libvirt/lvm.py:145 #, python-format msgid "Path %s must be LVM logical volume" msgstr "La vía de acceso %s debe ser el volumen lógico LVM" -#: nova/virt/libvirt/lvm.py:222 +#: nova/virt/libvirt/lvm.py:221 #, python-format msgid "volume_clear='%s' is not handled" msgstr "volume_clear='%s' no está manejado" @@ -9047,44 +9129,44 @@ msgstr "volume_clear='%s' no está manejado" msgid "Cannot find any Fibre Channel HBAs" msgstr "No se puede encontrar ningún HBA de canal de fibra" -#: nova/virt/libvirt/utils.py:431 +#: nova/virt/libvirt/utils.py:437 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "No se puede recuperar la vía de acceso ed dispositivo raíz de la " "configuración de libvirt de instancia" -#: nova/virt/libvirt/vif.py:353 nova/virt/libvirt/vif.py:608 -#: nova/virt/libvirt/vif.py:797 +#: nova/virt/libvirt/vif.py:356 nova/virt/libvirt/vif.py:574 +#: nova/virt/libvirt/vif.py:750 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "El parámetro vif_type debe estar presente para esta implementación de " "vif_driver" -#: nova/virt/libvirt/vif.py:397 nova/virt/libvirt/vif.py:628 -#: nova/virt/libvirt/vif.py:817 +#: nova/virt/libvirt/vif.py:362 nova/virt/libvirt/vif.py:580 +#: nova/virt/libvirt/vif.py:756 #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type=%s inesperado" -#: nova/virt/libvirt/volume.py:291 +#: nova/virt/libvirt/volume.py:294 #, python-format msgid "iSCSI device not found at %s" msgstr "No se ha encontrado el dispositivo iSCSI en %s" -#: nova/virt/libvirt/volume.py:737 +#: nova/virt/libvirt/volume.py:740 #, python-format msgid "AoE device not found at %s" msgstr "No se ha encontrado el dispositivo AoE en %s" -#: nova/virt/libvirt/volume.py:909 +#: nova/virt/libvirt/volume.py:912 msgid "We are unable to locate any Fibre Channel devices" msgstr "No se puede localizar ningún dispositivo de canal de fibra" -#: nova/virt/libvirt/volume.py:928 +#: nova/virt/libvirt/volume.py:931 msgid "Fibre Channel device not found." msgstr "No se ha encontrado el dispositivo de canal de fibra." -#: nova/virt/vmwareapi/driver.py:103 +#: nova/virt/vmwareapi/driver.py:104 msgid "" "The VMware ESX driver is now deprecated and will be removed in the Juno " "release. The VC driver will remain and continue to be supported." @@ -9093,7 +9175,7 @@ msgstr "" "liberación Juno. El controlador CV se mantendrá y seguirá siendo " "soportado." -#: nova/virt/vmwareapi/driver.py:115 +#: nova/virt/vmwareapi/driver.py:116 msgid "" "Must specify host_ip, host_username and host_password to use " "compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver" @@ -9101,38 +9183,38 @@ msgstr "" "Se debe especificar host_ip, host_username y host_password para usar " "compute_driver=vmwareapi.VMwareESXDriver o vmwareapi.VMwareVCDriver" -#: nova/virt/vmwareapi/driver.py:127 +#: nova/virt/vmwareapi/driver.py:128 #, python-format msgid "Invalid Regular Expression %s" msgstr "La expresión regular %s es inválida" -#: nova/virt/vmwareapi/driver.py:242 +#: nova/virt/vmwareapi/driver.py:243 msgid "Instance cannot be found in host, or in an unknownstate." msgstr "" "La instancia no se puede encontrar en el anfitrión o en un estado " "desconocido" -#: nova/virt/vmwareapi/driver.py:398 +#: nova/virt/vmwareapi/driver.py:403 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "Todos los clusters especificados %s no fueron encontrados en vCenter" -#: nova/virt/vmwareapi/driver.py:407 +#: nova/virt/vmwareapi/driver.py:412 #, python-format msgid "The following clusters could not be found in the vCenter %s" msgstr "Los siguientes clusters no pueden ser encontrados en el vcenter %s" -#: nova/virt/vmwareapi/driver.py:544 +#: nova/virt/vmwareapi/driver.py:551 #, python-format msgid "The resource %s does not exist" msgstr "El recurso %s no existe" -#: nova/virt/vmwareapi/driver.py:590 +#: nova/virt/vmwareapi/driver.py:597 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "Cluster o nombre de pool de recursos inválido: %s" -#: nova/virt/vmwareapi/driver.py:757 +#: nova/virt/vmwareapi/driver.py:771 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." @@ -9141,197 +9223,154 @@ msgstr "" "vCenter de VMware; por lo tanto no se puede regresar tiempo de ejecución " "solamente para un huésped." -#: nova/virt/vmwareapi/driver.py:845 -#, python-format -msgid "" -"Unable to connect to server at %(server)s, sleeping for %(seconds)s " -"seconds" -msgstr "" -"Incapaz de conectar al servidor en %(server)s, esperando durante " -"%(seconds)s segundos" - -#: nova/virt/vmwareapi/driver.py:865 +#: nova/virt/vmwareapi/driver.py:884 #, python-format msgid "Unable to validate session %s!" msgstr "Incapaz de validar sesión %s!" -#: nova/virt/vmwareapi/driver.py:906 +#: nova/virt/vmwareapi/driver.py:926 #, python-format msgid "Session %s is inactive!" msgstr "La sesión %s se encuentra inactiva!" -#: nova/virt/vmwareapi/driver.py:954 -#, python-format -msgid "In vmwareapi: _call_method (session=%s)" -msgstr "En vmwareapi: _call_method (session=%s)" - -#: nova/virt/vmwareapi/driver.py:998 +#: nova/virt/vmwareapi/driver.py:1017 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "Tarea [%(task_name)s] %(task_ref)s estado: error %(error_info)s" -#: nova/virt/vmwareapi/driver.py:1008 +#: nova/virt/vmwareapi/driver.py:1027 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "En vmwareapi:_poll_task, se ha obtenido este error %s" -#: nova/virt/vmwareapi/ds_util.py:38 +#: nova/virt/vmwareapi/ds_util.py:41 msgid "Datastore name cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:40 +#: nova/virt/vmwareapi/ds_util.py:43 msgid "Datastore reference cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:42 +#: nova/virt/vmwareapi/ds_util.py:45 msgid "Invalid capacity" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:45 +#: nova/virt/vmwareapi/ds_util.py:48 msgid "Capacity is smaller than free space" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:106 +#: nova/virt/vmwareapi/ds_util.py:109 msgid "datastore name empty" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:111 +#: nova/virt/vmwareapi/ds_util.py:114 nova/virt/vmwareapi/ds_util.py:146 msgid "path component cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:144 +#: nova/virt/vmwareapi/ds_util.py:160 msgid "datastore path empty" msgstr "" -#: nova/virt/vmwareapi/error_util.py:46 +#: nova/virt/vmwareapi/error_util.py:45 msgid "exception_summary must not be a list" msgstr "exception_summary no debe ser una lista" -#: nova/virt/vmwareapi/error_util.py:76 +#: nova/virt/vmwareapi/error_util.py:75 msgid "fault_list must be a list" msgstr "fault_list debe ser una lista" -#: nova/virt/vmwareapi/error_util.py:122 +#: nova/virt/vmwareapi/error_util.py:121 #, python-format msgid "Error(s) %s occurred in the call to RetrievePropertiesEx" msgstr "El(los) Error(es) %s han ocurrido en la llamada de RetrievePropertiesEx" -#: nova/virt/vmwareapi/error_util.py:136 +#: nova/virt/vmwareapi/error_util.py:135 msgid "VMware Driver fault." msgstr "Falla de controlador de VMware" -#: nova/virt/vmwareapi/error_util.py:142 +#: nova/virt/vmwareapi/error_util.py:141 msgid "VMware Driver configuration fault." msgstr "Falla de configuración de de controlador de VMware" -#: nova/virt/vmwareapi/error_util.py:146 +#: nova/virt/vmwareapi/error_util.py:145 msgid "No default value for use_linked_clone found." msgstr "No se ha encontrado un valor predeterminado para used_linked_clone" -#: nova/virt/vmwareapi/error_util.py:150 +#: nova/virt/vmwareapi/error_util.py:149 #, python-format msgid "Missing parameter : %(param)s" msgstr "Parámetro omitido : %(param)s" -#: nova/virt/vmwareapi/error_util.py:154 +#: nova/virt/vmwareapi/error_util.py:153 msgid "No root disk defined." msgstr "No se ha definido un disco raíz." -#: nova/virt/vmwareapi/error_util.py:158 +#: nova/virt/vmwareapi/error_util.py:157 msgid "Resource already exists." msgstr "El recurso ya existe." -#: nova/virt/vmwareapi/error_util.py:163 +#: nova/virt/vmwareapi/error_util.py:162 msgid "Cannot delete file." msgstr "No se puede eliminar el archivo." -#: nova/virt/vmwareapi/error_util.py:168 +#: nova/virt/vmwareapi/error_util.py:167 msgid "File already exists." msgstr "El archivo ya existe." -#: nova/virt/vmwareapi/error_util.py:173 +#: nova/virt/vmwareapi/error_util.py:172 msgid "File fault." msgstr "Fallo de archivo." -#: nova/virt/vmwareapi/error_util.py:178 +#: nova/virt/vmwareapi/error_util.py:177 msgid "File locked." msgstr "Archivo bloqueado." -#: nova/virt/vmwareapi/error_util.py:183 +#: nova/virt/vmwareapi/error_util.py:182 msgid "File not found." msgstr "Archivo no encontrado." -#: nova/virt/vmwareapi/error_util.py:188 +#: nova/virt/vmwareapi/error_util.py:187 msgid "Invalid property." msgstr "Propiedad inválida." -#: nova/virt/vmwareapi/error_util.py:193 +#: nova/virt/vmwareapi/error_util.py:192 msgid "No Permission." msgstr "Sin permiso." -#: nova/virt/vmwareapi/error_util.py:198 +#: nova/virt/vmwareapi/error_util.py:197 msgid "Not Authenticated." msgstr "No autenticado." -#: nova/virt/vmwareapi/error_util.py:203 +#: nova/virt/vmwareapi/error_util.py:202 msgid "Invalid Power State." msgstr "Estado de energia inválido." -#: nova/virt/vmwareapi/error_util.py:228 +#: nova/virt/vmwareapi/error_util.py:227 #, python-format msgid "Fault %s not matched." msgstr "El fallo %s no ha coincidido." -#: nova/virt/vmwareapi/fake.py:243 -#, python-format -msgid "Property %(attr)s not set for the managed object %(name)s" -msgstr "" -"La propiedad %(attr)s no se ha establecido para el objeto gestionado " -"%(name)s" - -#: nova/virt/vmwareapi/fake.py:967 -msgid "There is no VM registered" -msgstr "No hay ninguna VM registrada" - -#: nova/virt/vmwareapi/fake.py:969 nova/virt/vmwareapi/fake.py:1290 -#, python-format -msgid "Virtual Machine with ref %s is not there" -msgstr "La máquina virtual con la referencia %s no está allí" - -#: nova/virt/vmwareapi/fake.py:1052 -#, python-format -msgid "Logging out a session that is invalid or already logged out: %s" -msgstr "Finalizando sesión que no es válida o que ya ha finalizado: %s" - -#: nova/virt/vmwareapi/fake.py:1070 -msgid "Session Invalid" -msgstr "Sesión no válida" - -#: nova/virt/vmwareapi/fake.py:1287 -msgid "No Virtual Machine has been registered yet" -msgstr "No se ha registrado aún ninguna máquina virtual " - #: nova/virt/vmwareapi/imagecache.py:74 #, python-format msgid "Unable to delete %(file)s. Exception: %(ex)s" msgstr "Incapaz de remover %(file)s. Excepción: %(ex)s" -#: nova/virt/vmwareapi/imagecache.py:148 +#: nova/virt/vmwareapi/imagecache.py:147 #, python-format msgid "Image %s is no longer used by this node. Pending deletion!" msgstr "La imagen %s ya no está en uso por este nodo. Remoción pendiente!" -#: nova/virt/vmwareapi/imagecache.py:153 +#: nova/virt/vmwareapi/imagecache.py:152 #, python-format msgid "Image %s is no longer used. Deleting!" msgstr "La imagen %s ya no está en uso. Eliminando!" -#: nova/virt/vmwareapi/io_util.py:121 +#: nova/virt/vmwareapi/io_util.py:122 #, python-format msgid "Glance image %s is in killed state" msgstr "La imagen Glance %s está en estado de matado" -#: nova/virt/vmwareapi/io_util.py:129 +#: nova/virt/vmwareapi/io_util.py:130 #, python-format msgid "Glance image %(image_id)s is in unknown state - %(state)s" msgstr "La imagen Glance %(image_id)s está en estado desconocido - %(state)s" @@ -9392,50 +9431,49 @@ msgstr "Excepción en %s " msgid "Unable to retrieve value for %(path)s Reason: %(reason)s" msgstr "Incapaz de obtener valor de %(path)s Razón: %(reason)s" -#: nova/virt/vmwareapi/vm_util.py:195 +#: nova/virt/vmwareapi/vm_util.py:196 #, python-format msgid "%s is not supported." msgstr "%s no está soportada." -#: nova/virt/vmwareapi/vm_util.py:980 +#: nova/virt/vmwareapi/vm_util.py:989 msgid "No host available on cluster" msgstr "No hay anfitrión disponible en cluster." -#: nova/virt/vmwareapi/vm_util.py:1210 +#: nova/virt/vmwareapi/vm_util.py:1083 #, python-format msgid "Failed to get cluster references %s" msgstr "Fallo al obtener las referencias del cluster %s" -#: nova/virt/vmwareapi/vm_util.py:1222 +#: nova/virt/vmwareapi/vm_util.py:1095 #, python-format msgid "Failed to get resource pool references %s" msgstr "Fallo al obtener las referencias del pool de recursos %s" -#: nova/virt/vmwareapi/vm_util.py:1404 +#: nova/virt/vmwareapi/vm_util.py:1285 msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None" msgstr "" "vmwareapi:vm_util:clone_vmref_for_instance, ha sido llamada con " "vm_ref=None" -#: nova/virt/vmwareapi/vmops.py:131 +#: nova/virt/vmwareapi/vmops.py:132 #, python-format msgid "Extending virtual disk failed with error: %s" msgstr "La extensión del disco virtual ha fallado con el error: %s" -#: nova/virt/vmwareapi/vmops.py:246 +#: nova/virt/vmwareapi/vmops.py:249 msgid "Image disk size greater than requested disk size" msgstr "La imagen de disco es más grande que el tamaño del disco solicitado" -#: nova/virt/vmwareapi/vmops.py:471 -#, python-format -msgid "Root disk file creation failed - %s" -msgstr "Fallo al crear el archivo del disco raíz - %s" - -#: nova/virt/vmwareapi/vmops.py:813 +#: nova/virt/vmwareapi/vmops.py:856 msgid "instance is not powered on" msgstr "instancia no activada" -#: nova/virt/vmwareapi/vmops.py:869 +#: nova/virt/vmwareapi/vmops.py:884 +msgid "Instance does not exist on backend" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:916 #, python-format msgid "" "In vmwareapi:vmops:_destroy_instance, got this exception while un-" @@ -9444,36 +9482,33 @@ msgstr "" "En vmwareapi:vmops:_destroy_instance, se obtuvo esta excepción mientras " "se removía el registro de VM: %s" -#: nova/virt/vmwareapi/vmops.py:892 -#, python-format +#: nova/virt/vmwareapi/vmops.py:939 msgid "" -"In vmwareapi:vmops:_destroy_instance, got this exception while deleting " -"the VM contents from the disk: %s" +"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM " +"contents from the disk" msgstr "" -"En vmwareapi:vmops:_destroy_instance, se obtuvo esta excepción mientras " -"se removía el contenido de la VM del disco: %s" -#: nova/virt/vmwareapi/vmops.py:926 +#: nova/virt/vmwareapi/vmops.py:972 msgid "pause not supported for vmwareapi" msgstr "pausa no soportada para vmwareapi" -#: nova/virt/vmwareapi/vmops.py:930 +#: nova/virt/vmwareapi/vmops.py:976 msgid "unpause not supported for vmwareapi" msgstr "cancelación de pausa no soportada para vmwareapi" -#: nova/virt/vmwareapi/vmops.py:948 +#: nova/virt/vmwareapi/vmops.py:994 msgid "instance is powered off and cannot be suspended." msgstr "instancia está desactivada y no se puede suspender. " -#: nova/virt/vmwareapi/vmops.py:968 +#: nova/virt/vmwareapi/vmops.py:1014 msgid "instance is not in a suspended state" msgstr "la instancia no está en un estado suspendido" -#: nova/virt/vmwareapi/vmops.py:1056 +#: nova/virt/vmwareapi/vmops.py:1102 msgid "instance is suspended and cannot be powered off." msgstr "la instancia está suspendida y no se puede desactivar " -#: nova/virt/vmwareapi/vmops.py:1147 +#: nova/virt/vmwareapi/vmops.py:1193 #, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" @@ -9482,29 +9517,29 @@ msgstr "" "En vmwareapi:vmops:confirm_migration, se ha obtenido esta excepción al " "destruir la máquina virtual: %s" -#: nova/virt/vmwareapi/vmops.py:1213 nova/virt/xenapi/vmops.py:1497 +#: nova/virt/vmwareapi/vmops.py:1255 nova/virt/xenapi/vmops.py:1497 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" "Se han encontrado %(instance_count)d rearranques colgados de más de " "%(timeout)d segundos" -#: nova/virt/vmwareapi/vmops.py:1217 nova/virt/xenapi/vmops.py:1501 +#: nova/virt/vmwareapi/vmops.py:1259 nova/virt/xenapi/vmops.py:1501 msgid "Automatically hard rebooting" msgstr "Rearrancando automáticamente de forma permanente" -#: nova/virt/vmwareapi/volumeops.py:217 nova/virt/vmwareapi/volumeops.py:251 +#: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" msgstr "" "El punto de montaje %(mountpoint)s esta unido a la instancia " "%(instance_name)s" -#: nova/virt/vmwareapi/volumeops.py:239 nova/virt/vmwareapi/volumeops.py:414 +#: nova/virt/vmwareapi/volumeops.py:363 nova/virt/vmwareapi/volumeops.py:538 msgid "Unable to find iSCSI Target" msgstr "No se puede encontrar el destino iSCSI " -#: nova/virt/vmwareapi/volumeops.py:337 +#: nova/virt/vmwareapi/volumeops.py:461 #, python-format msgid "" "The volume's backing has been relocated to %s. Need to consolidate " @@ -9513,11 +9548,11 @@ msgstr "" "El volúmen de apoyo ha sido reubicado a %s. Se necesita consolidar el " "archivo de disco de apoyo." -#: nova/virt/vmwareapi/volumeops.py:375 nova/virt/vmwareapi/volumeops.py:422 +#: nova/virt/vmwareapi/volumeops.py:499 nova/virt/vmwareapi/volumeops.py:546 msgid "Unable to find volume" msgstr "No se puede encontrar volumen" -#: nova/virt/vmwareapi/volumeops.py:395 nova/virt/vmwareapi/volumeops.py:424 +#: nova/virt/vmwareapi/volumeops.py:519 nova/virt/vmwareapi/volumeops.py:548 #: nova/virt/xenapi/volumeops.py:148 #, python-format msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" @@ -9620,16 +9655,16 @@ msgstr "Error al limpiar VDI conectados " msgid "Could not determine key: %s" msgstr "No se ha podido determinar la clave: %s" -#: nova/virt/xenapi/driver.py:632 +#: nova/virt/xenapi/driver.py:636 msgid "Host startup on XenServer is not supported." msgstr "No se soporta el arranque de host en XenServer." -#: nova/virt/xenapi/fake.py:812 +#: nova/virt/xenapi/fake.py:811 #, python-format msgid "xenapi.fake does not have an implementation for %s" msgstr "xenapi.fake no tiene una implementación para %s" -#: nova/virt/xenapi/fake.py:920 +#: nova/virt/xenapi/fake.py:919 #, python-format msgid "" "xenapi.fake does not have an implementation for %s or it has been called " @@ -9767,16 +9802,16 @@ msgstr "" "El dispositivo con identificador %(id)s especificado no está soportado " "por la versión del hipervisor %(version)s" -#: nova/virt/xenapi/vm_utils.py:325 nova/virt/xenapi/vm_utils.py:340 +#: nova/virt/xenapi/vm_utils.py:326 nova/virt/xenapi/vm_utils.py:341 msgid "VM already halted, skipping shutdown..." msgstr "VM ya se ha detenido, omitiendo la conclusión... " -#: nova/virt/xenapi/vm_utils.py:392 +#: nova/virt/xenapi/vm_utils.py:393 #, python-format msgid "VBD %s already detached" msgstr "VBD %s ya se ha desconectado" -#: nova/virt/xenapi/vm_utils.py:395 +#: nova/virt/xenapi/vm_utils.py:396 #, python-format msgid "" "VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt " @@ -9785,36 +9820,36 @@ msgstr "" "La desconexión del VBD %(vbd_ref)s ha fallado con \"%(err)s\", intento " "%(num_attempt)d/%(max_attempts)d" -#: nova/virt/xenapi/vm_utils.py:402 +#: nova/virt/xenapi/vm_utils.py:403 #, python-format msgid "Unable to unplug VBD %s" msgstr "Imposible desconectar VBD %s" -#: nova/virt/xenapi/vm_utils.py:405 +#: nova/virt/xenapi/vm_utils.py:406 #, python-format msgid "Reached maximum number of retries trying to unplug VBD %s" msgstr "Se ha alcanzado el número máximo de reintentos de desconectar VBD %s " -#: nova/virt/xenapi/vm_utils.py:417 +#: nova/virt/xenapi/vm_utils.py:418 #, python-format msgid "Unable to destroy VBD %s" msgstr "Imposible destruir VBD %s" -#: nova/virt/xenapi/vm_utils.py:470 +#: nova/virt/xenapi/vm_utils.py:471 #, python-format msgid "Unable to destroy VDI %s" msgstr "No se puede destruir VDI %s" -#: nova/virt/xenapi/vm_utils.py:516 +#: nova/virt/xenapi/vm_utils.py:517 msgid "SR not present and could not be introduced" msgstr "SR no está presente y no se ha podido introducir" -#: nova/virt/xenapi/vm_utils.py:700 +#: nova/virt/xenapi/vm_utils.py:701 #, python-format msgid "No primary VDI found for %s" msgstr "No se ha encontrado VDI primario para %s" -#: nova/virt/xenapi/vm_utils.py:792 +#: nova/virt/xenapi/vm_utils.py:793 #, python-format msgid "" "Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s" @@ -9823,12 +9858,12 @@ msgstr "" "Solo los SRs basados en archivo (ext/NFS) están soportados por esta " "característica. SR %(uuid)s es del tipo %(type)s" -#: nova/virt/xenapi/vm_utils.py:871 +#: nova/virt/xenapi/vm_utils.py:872 #, python-format msgid "Multiple base images for image: %s" msgstr "Múltiple imágenes base para la imagen: %s" -#: nova/virt/xenapi/vm_utils.py:926 +#: nova/virt/xenapi/vm_utils.py:927 #, python-format msgid "" "VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor " @@ -9837,31 +9872,31 @@ msgstr "" "El VDI %(vdi_ref)s es de %(virtual_size)d bytes lo que es mayor que el " "tamaño del sabor de %(new_disk_size)d bytes." -#: nova/virt/xenapi/vm_utils.py:937 nova/virt/xenapi/vmops.py:1037 +#: nova/virt/xenapi/vm_utils.py:938 nova/virt/xenapi/vmops.py:1037 msgid "Can't resize a disk to 0 GB." msgstr "No se puede cambiar el tamaño de archivo a 0 GB." -#: nova/virt/xenapi/vm_utils.py:989 +#: nova/virt/xenapi/vm_utils.py:990 msgid "Disk must have only one partition." msgstr "el disco debe tener una sola partición." -#: nova/virt/xenapi/vm_utils.py:994 +#: nova/virt/xenapi/vm_utils.py:995 #, python-format msgid "Disk contains a filesystem we are unable to resize: %s" msgstr "" "El disco contiene un sistema de archivos incapaz de modificar su tamaño: " "%s" -#: nova/virt/xenapi/vm_utils.py:999 +#: nova/virt/xenapi/vm_utils.py:1000 msgid "The only partition should be partition 1." msgstr "La unica partición debe ser la partición 1." -#: nova/virt/xenapi/vm_utils.py:1010 +#: nova/virt/xenapi/vm_utils.py:1011 #, python-format msgid "Attempted auto_configure_disk failed because: %s" msgstr "El intento de auto_configure_disk ha fallado por: %s" -#: nova/virt/xenapi/vm_utils.py:1261 +#: nova/virt/xenapi/vm_utils.py:1262 #, python-format msgid "" "Fast cloning is only supported on default local SR of type ext. SR on " @@ -9871,24 +9906,24 @@ msgstr "" "ext. Se ha encontrado que los SR de este sistema son de tipo %s. " "Ignorando el identificador cow." -#: nova/virt/xenapi/vm_utils.py:1336 +#: nova/virt/xenapi/vm_utils.py:1337 #, python-format msgid "Unrecognized cache_images value '%s', defaulting to True" msgstr "" "Valor cache_images no reconocido '%s', se toma True como valor " "predeterminado" -#: nova/virt/xenapi/vm_utils.py:1412 +#: nova/virt/xenapi/vm_utils.py:1413 #, python-format msgid "Invalid value '%s' for torrent_images" msgstr "valor inválido '%s' para torrent_images" -#: nova/virt/xenapi/vm_utils.py:1435 +#: nova/virt/xenapi/vm_utils.py:1436 #, python-format msgid "Invalid value '%d' for image_compression_level" msgstr "Valor inválido '%d' para image_compression_level" -#: nova/virt/xenapi/vm_utils.py:1461 +#: nova/virt/xenapi/vm_utils.py:1462 #, python-format msgid "" "Download handler '%(handler)s' raised an exception, falling back to " @@ -9897,14 +9932,14 @@ msgstr "" "La descarga del manejador '%(handler)s' ha arrojado una excepción, " "restaurando hacia el manejador predeterminado '%(default_handler)s" -#: nova/virt/xenapi/vm_utils.py:1517 +#: nova/virt/xenapi/vm_utils.py:1518 #, python-format msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d" msgstr "" "El tamaño de la imagen %(size)d excede el tamaño permitido por el sabor " "%(allowed_size)d" -#: nova/virt/xenapi/vm_utils.py:1568 +#: nova/virt/xenapi/vm_utils.py:1569 #, python-format msgid "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " @@ -9913,26 +9948,26 @@ msgstr "" "La imagen de kernel/disco RAM es demasiado grande: %(vdi_size)d bytes, " "máx. %(max_size)d bytes" -#: nova/virt/xenapi/vm_utils.py:1610 +#: nova/virt/xenapi/vm_utils.py:1611 msgid "Failed to fetch glance image" msgstr "No se ha podido captar la imagen glance" -#: nova/virt/xenapi/vm_utils.py:1818 +#: nova/virt/xenapi/vm_utils.py:1819 #, python-format msgid "Unable to parse rrd of %s" msgstr "Incapaz de analizar rrd de %s" -#: nova/virt/xenapi/vm_utils.py:1848 +#: nova/virt/xenapi/vm_utils.py:1849 #, python-format msgid "Retry SR scan due to error: %s" msgstr "Reintentando escaneo de SR debido a error: %s" -#: nova/virt/xenapi/vm_utils.py:1881 +#: nova/virt/xenapi/vm_utils.py:1882 #, python-format msgid "Flag sr_matching_filter '%s' does not respect formatting convention" msgstr "El distintivo sr_matching_filter '%s' no respeta el convenio de formato" -#: nova/virt/xenapi/vm_utils.py:1902 +#: nova/virt/xenapi/vm_utils.py:1903 msgid "" "XenAPI is unable to find a Storage Repository to install guest instances " "on. Please check your configuration (e.g. set a default SR for the pool) " @@ -9943,11 +9978,11 @@ msgstr "" "establece un SR predeterminado en el conjunto) y/o ocnfigura el " "identificador 'sr_matching_filter'." -#: nova/virt/xenapi/vm_utils.py:1915 +#: nova/virt/xenapi/vm_utils.py:1916 msgid "Cannot find SR of content-type ISO" msgstr "No se puede encontrar SR de content-type ISO" -#: nova/virt/xenapi/vm_utils.py:1968 +#: nova/virt/xenapi/vm_utils.py:1969 #, python-format msgid "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " @@ -9956,22 +9991,22 @@ msgstr "" "No se ha podido obtener XML RRD para la máquina virtual %(vm_uuid)s con " "los detalles de servidor: %(server)s." -#: nova/virt/xenapi/vm_utils.py:2096 +#: nova/virt/xenapi/vm_utils.py:2097 #, python-format msgid "VHD coalesce attempts exceeded (%d), giving up..." msgstr "Intentos de incorporación de VHD excedidos (%d), dejando de intentar..." -#: nova/virt/xenapi/vm_utils.py:2131 +#: nova/virt/xenapi/vm_utils.py:2132 #, python-format msgid "Timeout waiting for device %s to be created" msgstr "Se ha excedido el tiempo esperando a que se creara el dispositivo %s" -#: nova/virt/xenapi/vm_utils.py:2151 +#: nova/virt/xenapi/vm_utils.py:2152 #, python-format msgid "Disconnecting stale VDI %s from compute domU" msgstr "Desconectando VDI obsoleto %s de domU de cálculo " -#: nova/virt/xenapi/vm_utils.py:2309 +#: nova/virt/xenapi/vm_utils.py:2310 msgid "" "Shrinking the filesystem down with resize2fs has failed, please check if " "you have enough free space on your disk." @@ -9979,40 +10014,40 @@ msgstr "" "La reducción del sistema de archivos con resize2fs ha fallado, por favor " "verifica si tienes espacio libre suficiente en tu disco." -#: nova/virt/xenapi/vm_utils.py:2444 +#: nova/virt/xenapi/vm_utils.py:2445 msgid "Manipulating interface files directly" msgstr "Manipulando archivos de interfaz directamente " -#: nova/virt/xenapi/vm_utils.py:2453 +#: nova/virt/xenapi/vm_utils.py:2454 #, python-format msgid "Failed to mount filesystem (expected for non-linux instances): %s" msgstr "" "No se ha podido montar sistema de archivos (se espera para instancias no " "Linux): %s " -#: nova/virt/xenapi/vm_utils.py:2564 +#: nova/virt/xenapi/vm_utils.py:2566 msgid "This domU must be running on the host specified by connection_url" msgstr "" "Este domU debe estar en ejecución en el anfitrión especificado por " "connection_url" -#: nova/virt/xenapi/vm_utils.py:2633 +#: nova/virt/xenapi/vm_utils.py:2635 msgid "Failed to transfer vhd to new host" msgstr "No se ha podido transferir vhd al nuevo host" -#: nova/virt/xenapi/vm_utils.py:2659 +#: nova/virt/xenapi/vm_utils.py:2661 msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..." msgstr "" "ipxe_boot_menu_url no establecido, el usuario debe ingresar la URL " "manualmente..." -#: nova/virt/xenapi/vm_utils.py:2665 +#: nova/virt/xenapi/vm_utils.py:2667 msgid "ipxe_network_name not set, user will have to enter IP manually..." msgstr "" "ipxe_network_name no establecido, el usuario debe ingresar la dirección " "IP manualmente..." -#: nova/virt/xenapi/vm_utils.py:2676 +#: nova/virt/xenapi/vm_utils.py:2678 #, python-format msgid "" "Unable to find network matching '%(network_name)s', user will have to " @@ -10021,7 +10056,7 @@ msgstr "" "Incapaz de encontrar red coincidente '%(network_name)s', el usuario " "deberá introducir una dirección IP manualmente..." -#: nova/virt/xenapi/vm_utils.py:2700 +#: nova/virt/xenapi/vm_utils.py:2702 #, python-format msgid "ISO creation tool '%s' does not exist." msgstr "La herramienta de creación de ISO '%s' no esiste." diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po index cf97a0c368..2efca429e6 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-24 06:06+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-05-30 06:26+0000\n" "Last-Translator: FULL NAME \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" @@ -19,5 +19,16 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" +#: nova/virt/vmwareapi/driver.py:864 +#, python-format +msgid "" +"Unable to connect to server at %(server)s, sleeping for %(seconds)s seconds" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:973 +#, python-format +msgid "In vmwareapi: _call_method (session=%s)" +msgstr "" + #~ msgid "Dummy message for transifex setup." #~ msgstr "message fictif pour la configuration transifex" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-error.po b/nova/locale/fr/LC_MESSAGES/nova-log-error.po index dbe999d332..3031dc40da 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" @@ -44,6 +44,11 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Impossible d'avertir les cellules de l'erreur d'instance" @@ -58,7 +63,7 @@ msgstr "Exception d'origine en cours de suppression : %s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "Exception inattendue survenue %d fois... Nouvel essai." -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -71,22 +76,22 @@ msgstr "dans l'appel en boucle de durée fixe" msgid "in dynamic looping call" msgstr "dans l'appel en boucle dynamique" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "Erreur pendant %(full_task_name)s : %(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "Règle %s incompréhensible" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "Aucun gestionnaire pour les correspondances de type %s" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "Règle %r incompréhensible" @@ -116,137 +121,133 @@ msgstr "Exception BD encapsulée." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -266,20 +267,20 @@ msgstr "" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -288,12 +289,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-info.po b/nova/locale/fr/LC_MESSAGES/nova-log-info.po index c1685db4d1..08f0723004 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-30 05:01+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" "fr/)\n" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "Chemin de verrou créé: %s" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "Echec de la suppression du fichier %(file)s" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "Tâche périodique %(task)s ignorée car son intervalle est négatif" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Tâche périodique %(task)s car elle est désactivée" @@ -84,7 +90,7 @@ msgstr "%s interceptée, arrêt de l'enfant" #: nova/openstack/common/service.py:403 msgid "Wait called after thread killed. Cleaning up." -msgstr "" +msgstr "Pause demandée après suppression de thread. Nettoyage." #: nova/openstack/common/service.py:414 #, python-format @@ -101,88 +107,93 @@ msgstr "Suppression ligne en double avec l'ID : %(id)s de la table : %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "Instance détruite avec succès." -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "L'instance peut être redémarrée." -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "Tentative de redestruction de l'instance." -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "Démarrage du processus d'instantané en temps réel" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "Démarrage du processus d'instantané à froid" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "Instantané extrait, démarrage du téléchargement d'image" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "Téléchargement d'image instantanée terminé" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "Instance redémarrée par logiciel avec succès." -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "L'instance s'est arrêtée avec succès." -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "L'instance a sans doute été redémarrée par logiciel ; retour en cours." -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "L'instance a redémarré avec succès." -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "Instance générée avec succès." -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Journal de console tronqué retourné, %d octets ignorés" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "Création de l'image" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "Utilisation de l'unité de config" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "Création de l'unité de config à %(path)s" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -192,7 +203,7 @@ msgstr "" "être détaché. Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s " "Erreur=%(e)s" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -201,26 +212,26 @@ msgstr "" "Domaine introuvable dans libvirt pour l'instance %s. Impossible d'obtenir " "les stats de bloc pour l'unité" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "L'instance s'exécute avec succès." -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -233,7 +244,7 @@ msgstr "setup_basic_filtering appelé dans nwfilter" msgid "Ensuring static filters" msgstr "Garantie des filtres statiques" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "" "Vous avez essayé d'annuler le filtre d'une instance qui n'est pas filtrée" @@ -296,11 +307,11 @@ msgstr "Fichiers de base endommagés : %s" msgid "Removable base files: %s" msgstr "Fichiers de base pouvant être retirés : %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/it/LC_MESSAGES/nova-log-info.po b/nova/locale/it/LC_MESSAGES/nova-log-info.po index 97d107968f..65f0db86ff 100644 --- a/nova/locale/it/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/it/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-18 19:31+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Italian (http://www.transifex.com/projects/p/nova/language/" "it/)\n" @@ -19,28 +19,34 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" -msgstr "" +msgstr "Preato percorso di blocco : %s" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" -msgstr "" +msgstr "Tentativo fallito nella rimozione di %(file)s" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" "Abbadono dell'attività periodica %(task)s perché l'intervalo è negativo" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Abbadono dell'attività periodica %(task)s perché è disabilitata" @@ -85,7 +91,7 @@ msgstr "Intercettato %s, arresto in corso dei children" #: nova/openstack/common/service.py:403 msgid "Wait called after thread killed. Cleaning up." -msgstr "" +msgstr "Attendere la chiamata dopo l'uccisione de filo. Bonifica." #: nova/openstack/common/service.py:414 #, python-format @@ -95,143 +101,152 @@ msgstr "In attesa %d degli elementi secondari per uscire" #: nova/openstack/common/db/sqlalchemy/utils.py:387 #, python-format msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" -msgstr "" +msgstr "Cancellata riga duplicata con id: %(id)s dalla tablella: %(table)s" #: nova/scheduler/filters/utils.py:50 #, python-format msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 -msgid "Instance destroyed successfully." +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:904 -msgid "Instance may be started again." +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:958 +msgid "Instance destroyed successfully." +msgstr "Istanza distrutta correttamente." + +#: nova/virt/libvirt/driver.py:968 +msgid "Instance may be started again." +msgstr "L'istanza può essere avviata di nuovo." + +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." -msgstr "" +msgstr "L'istanza verrà nuovamente distrutta." -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" -msgstr "" +msgstr "Inizio processo attivo istantanea" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" -msgstr "" +msgstr "Inizio processo di istantanea a freddo" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" -msgstr "" +msgstr "Istantanea estratta, inizio caricamento immagine" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" -msgstr "" +msgstr "Caricamento immagine istantanea completato" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." -msgstr "" +msgstr "Avvio a caldo dell'istanza eseguito correttamente." -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." -msgstr "" +msgstr "Chiusura dell'istanza eseguita correttamente." -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" +"L'istanza potrebbe essere stat riavviata durante l'avvio a caldo, quindi " +"ritornare adesso." -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." -msgstr "" +msgstr "Istanza riavviata correttamente." -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." -msgstr "" +msgstr "Istanza generata correttamente." -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" -msgstr "" +msgstr "dati: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" -msgstr "" +msgstr "Restituito log della console troncato, %d byte ignorati" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" -msgstr "" +msgstr "Creazione immagine" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" -msgstr "" +msgstr "Utilizzo unità di config" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" -msgstr "" +msgstr "Creazione unità config in %(path)s" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" +"Impossibile trovare il dominio in libvirt per l'istanza %s. Impossibile " +"ottenere le statistiche del blocco per l'unità" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." -msgstr "" +msgstr "Istanza in esecuzione correttamente." -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" #: nova/virt/libvirt/firewall.py:105 msgid "Called setup_basic_filtering in nwfilter" -msgstr "" +msgstr "Chiamato setup_basic_filtering in nwfilter" #: nova/virt/libvirt/firewall.py:113 msgid "Ensuring static filters" -msgstr "" +msgstr "Controllo dei filtri statici" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" -msgstr "" +msgstr "Si è tentato di rimuovere il filtro da un'istanza senza filtro" #: nova/virt/libvirt/imagecache.py:191 #, python-format @@ -290,11 +305,11 @@ msgstr "File di base danneggiato: %s" msgid "Removable base files: %s" msgstr "File di base rimovibili: %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-error.po b/nova/locale/ja/LC_MESSAGES/nova-log-error.po index ec595ace0f..ea9903b1fb 100644 --- a/nova/locale/ja/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/ja/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-06-20 16:41+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/" @@ -44,6 +44,11 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "インスタンスの障害をセルに通知できませんでした" @@ -58,7 +63,7 @@ msgstr "除去される元の例外: %s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "予期せぬ例外が、%d回()発生しました。再試行中。" -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -71,22 +76,22 @@ msgstr "一定期間の呼び出しループ" msgid "in dynamic looping call" msgstr "動的呼び出しループ" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "%(full_task_name)s 中のエラー: %(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "ルール %s を解釈できませんでした" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "種類 %s の一致向けのハンドラーがありません" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "ルール %r を解釈できませんでした" @@ -116,137 +121,133 @@ msgstr "DB 例外がラップされました。" msgid "Failed to migrate to version %s on engine %s" msgstr "バージョン%sをエンジン%sへの移行が失敗しました。" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -268,20 +269,20 @@ msgstr "(%(base_file)s) にあるイメージ %(id)s: イメージの検査が msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "%(base_file)s の削除に失敗しました。エラーは %(error)s" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -290,12 +291,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-info.po b/nova/locale/ja/LC_MESSAGES/nova-log-info.po index 8dbd4c8f9d..2f1a79f4ee 100644 --- a/nova/locale/ja/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/ja/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2014-06-30 04:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet backdoorは、プロセス%(pid)dの%(port)sをリスニングしています。" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "タスクの間隔が負であるため、定期タスク %(task)s をスキップしています" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "タスクが使用不可であるため、定期タスク %(task)s をスキップしています" @@ -101,99 +107,104 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "インスタンスが正常に破棄されました。" -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "インスタンスを再び開始できます。" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "インスタンスの破棄を再び行います。" -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "ライブ・スナップショット・プロセスを開始しています" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "コールド・スナップショット・プロセスを開始しています" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "" "スナップショットが抽出されました。イメージのアップロードを開始しています" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "スナップショット・イメージのアップロードが完了しました" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "インスタンスが正常にソフト・リブートされました。" -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "インスタンスが正常にシャットダウンされました。" -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "インスタンスはソフト・リブート時にリブートされた可能性があるため、ここで返し" "ます。" -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "インスタンスが正常にリブートされました。" -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "インスタンスが正常に作成されました。" -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "データ: %(data)r, ファイルパス: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" "切り捨てられたコンソール・ログが返されました。%d バイトが無視されました" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "イメージの作成中" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "構成ドライブを使用中" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "構成ドライブを %(path)s に作成しています" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -202,26 +213,26 @@ msgstr "" "インスタンス %s 用のドメインが Libvirt 内で見つかりませんでした。デバイスのブ" "ロックの統計を取得できません" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "インスタンスが正常に実行されています。" -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -234,7 +245,7 @@ msgstr "nwfilter で setup_basic_filtering を呼び出しました" msgid "Ensuring static filters" msgstr "静的フィルターの確認中" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "" "フィルター処理されていないインスタンスに対してフィルター処理の取り消しが試み" @@ -297,11 +308,11 @@ msgstr "破損した基本ファイル: %s" msgid "Removable base files: %s" msgstr "削除可能な基本ファイル: %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po index 898ffd2a71..a565b529c5 100644 --- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-06-16 04:10+0000\n" "Last-Translator: jaekwon.park \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/" @@ -45,6 +45,11 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "셀에 인스턴스 결함을 알리지 못했음" @@ -59,7 +64,7 @@ msgstr "기존 예외가 삭제됨: %s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "예기치 않은 예외 %d 번 발생하였습니다... 다시 시도중." -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -72,22 +77,22 @@ msgstr "고정 기간 루프 호출에서" msgid "in dynamic looping call" msgstr "동적 루프 호출에서" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "%(full_task_name)s 중 오류: %(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "%s 규칙을 이해하는데 실패했습니다" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "%s 유형의 일치에 대한 핸들러가 없음" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "룰 %r를 이해하지 못했습니다." @@ -117,137 +122,133 @@ msgstr "DB 예외가 랩핑되었습니다." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -267,20 +268,20 @@ msgstr "" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -289,12 +290,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po index 53e0163716..20799e4858 100644 --- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2014-06-30 04:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Eventlet 백도어는 프로세스 %(pid)d 일 동안 %(port)s에서 수신" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "간격이 음수이기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "사용 안하기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" @@ -101,96 +107,101 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "인스턴스가 영구 삭제되었습니다. " -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "인스턴스가 다시 시작됩니다." -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "인스턴스를 다시 영구 삭제하려 합니다." -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "라이브 스냅샷 프로세스 시작 중" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "콜드 스냅샷 프로세스 시작 중" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "스냅샷 추출, 이미지 업로드 시작 중" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "스냅샷 이미지 업로드 완료" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "인스턴스가 소프트 리부트되었습니다. " -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "인스턴스가 시스템 종료되었습니다. " -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "인스턴스가 소프트 리부트 중에 다시 부팅되었을 수 있으므로, 지금 리턴합니다. " -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "인스턴스가 다시 부트되었습니다. " -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "인스턴스가 파생되었습니다. " -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "데이터: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "잘린 콘솔 로그가 리턴되었으며, %d 바이트는 무시됨" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "이미지 작성 중" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "구성 드라이브 사용 중" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "%(path)s에 구성 드라이브 작성 중" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -199,26 +210,26 @@ msgstr "" "%s 인스턴스에 대한 libvirt에서 도메인을 찾을 수 없습니다. 디바이스의 블록 통" "계를 가져올 수 없음" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "인스턴스가 정상적으로 실행 중입니다. " -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -231,7 +242,7 @@ msgstr "nwfilter에서 setup_basic_filtering을 호출했음" msgid "Ensuring static filters" msgstr "정적 필터 확인 중" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "필터링되지 않는 인스턴스를 필터링 해제하려고 했음" @@ -291,11 +302,11 @@ msgstr "손상된 기본 파일: %s" msgid "Removable base files: %s" msgstr "제거 가능한 기본 파일: %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/nova-log-critical.pot b/nova/locale/nova-log-critical.pot index 83a24b3e1b..f2c4fd2733 100644 --- a/nova/locale/nova-log-critical.pot +++ b/nova/locale/nova-log-critical.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev88.ged965df\n" +"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-17 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,3 +17,15 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" +#: nova/virt/vmwareapi/driver.py:864 +#, python-format +msgid "" +"Unable to connect to server at %(server)s, sleeping for %(seconds)s " +"seconds" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:973 +#, python-format +msgid "In vmwareapi: _call_method (session=%s)" +msgstr "" + diff --git a/nova/locale/nova-log-error.pot b/nova/locale/nova-log-error.pot index 49ae51960d..336dbaa455 100644 --- a/nova/locale/nova-log-error.pot +++ b/nova/locale/nova-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n" +"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -42,6 +42,10 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "" @@ -56,7 +60,7 @@ msgstr "" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "" -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -69,22 +73,22 @@ msgstr "" msgid "in dynamic looping call" msgstr "" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "" @@ -114,139 +118,135 @@ msgstr "" msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to " "Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to " "Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to " "take effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -266,20 +266,20 @@ msgstr "" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -288,13 +288,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" + diff --git a/nova/locale/nova-log-info.pot b/nova/locale/nova-log-info.pot index 0ada885bf3..196a2c7327 100644 --- a/nova/locale/nova-log-info.pot +++ b/nova/locale/nova-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n" +"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,27 +17,32 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "" @@ -99,121 +104,127 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: " +"%(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. " "Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats " "for device" msgstr "" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -226,7 +237,7 @@ msgstr "" msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -284,11 +295,11 @@ msgstr "" msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/nova-log-warning.pot b/nova/locale/nova-log-warning.pot index 5dfa8f908c..8e9cbeba3b 100644 --- a/nova/locale/nova-log-warning.pot +++ b/nova/locale/nova-log-warning.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n" +"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,10 +17,15 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/compute/manager.py:1998 +#: nova/compute/manager.py:2002 msgid "No more network or fixed IP to be allocated" msgstr "" +#: nova/compute/manager.py:2267 +#, python-format +msgid "Failed to delete volume: %(volume_id)s due to %(exc)s" +msgstr "" + #: nova/consoleauth/manager.py:84 #, python-format msgid "Token: %(token)s failed to save into memcached." @@ -90,12 +95,12 @@ msgstr "" msgid "Could not decode ram_allocation_ratio: '%s'" msgstr "" -#: nova/virt/libvirt/driver.py:368 +#: nova/virt/libvirt/driver.py:374 #, python-format msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s." msgstr "" -#: nova/virt/libvirt/driver.py:606 +#: nova/virt/libvirt/driver.py:620 #, python-format msgid "" "The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack " @@ -103,108 +108,122 @@ msgid "" "see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix" msgstr "" -#: nova/virt/libvirt/driver.py:656 +#: nova/virt/libvirt/driver.py:671 #, python-format msgid "URI %(uri)s does not support events: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:672 +#: nova/virt/libvirt/driver.py:687 #, python-format msgid "URI %(uri)s does not support connection events: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:865 +#: nova/virt/libvirt/driver.py:929 msgid "Cannot destroy instance, operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:971 +#: nova/virt/libvirt/driver.py:953 +msgid "During wait destroy, instance disappeared." +msgstr "" + +#: nova/virt/libvirt/driver.py:1035 msgid "Instance may be still running, destroy it again." msgstr "" -#: nova/virt/libvirt/driver.py:1026 +#: nova/virt/libvirt/driver.py:1088 #, python-format msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s" msgstr "" -#: nova/virt/libvirt/driver.py:1076 +#: nova/virt/libvirt/driver.py:1141 #, python-format msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually" msgstr "" -#: nova/virt/libvirt/driver.py:1357 nova/virt/libvirt/driver.py:1365 +#: nova/virt/libvirt/driver.py:1415 nova/virt/libvirt/driver.py:1423 msgid "During detach_volume, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1410 +#: nova/virt/libvirt/driver.py:1466 msgid "During detach_interface, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1976 +#: nova/virt/libvirt/driver.py:2053 msgid "Failed to soft reboot instance. Trying hard reboot." msgstr "" -#: nova/virt/libvirt/driver.py:2537 +#: nova/virt/libvirt/driver.py:2614 #, python-format msgid "Image %s not found on disk storage. Continue without injecting data" msgstr "" -#: nova/virt/libvirt/driver.py:2700 +#: nova/virt/libvirt/driver.py:2777 msgid "File injection into a boot from volume instance is not supported" msgstr "" -#: nova/virt/libvirt/driver.py:2775 +#: nova/virt/libvirt/driver.py:2852 msgid "Instance disappeared while detaching a PCI device from it." msgstr "" -#: nova/virt/libvirt/driver.py:2830 +#: nova/virt/libvirt/driver.py:2907 #, python-format msgid "Cannot update service status on host: %s,since it is not registered." msgstr "" -#: nova/virt/libvirt/driver.py:2833 +#: nova/virt/libvirt/driver.py:2910 #, python-format msgid "Cannot update service status on host: %s,due to an unexpected exception." msgstr "" -#: nova/virt/libvirt/driver.py:2861 +#: nova/virt/libvirt/driver.py:2938 #, python-format msgid "URI %(uri)s does not support full set of host capabilities: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:3672 +#: nova/virt/libvirt/driver.py:3763 #, python-format msgid "Timeout waiting for vif plugging callback for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3750 +#: nova/virt/libvirt/driver.py:3784 +#, python-format +msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:3841 msgid "" "Cannot get the number of cpu, because this function is not implemented " "for this platform. " msgstr "" -#: nova/virt/libvirt/driver.py:3813 +#: nova/virt/libvirt/driver.py:3901 +#, python-format +msgid "couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:3932 #, python-format -msgid "couldn't obtain the vpu count from domain id: %(id)s, exception: %(ex)s" +msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4050 +#: nova/virt/libvirt/driver.py:4134 #, python-format msgid "URI %(uri)s does not support listDevices: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:4594 +#: nova/virt/libvirt/driver.py:4789 #, python-format msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d." msgstr "" -#: nova/virt/libvirt/driver.py:4727 +#: nova/virt/libvirt/driver.py:4990 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4805 +#: nova/virt/libvirt/driver.py:4998 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -212,7 +231,7 @@ msgid "" "resize." msgstr "" -#: nova/virt/libvirt/driver.py:4811 +#: nova/virt/libvirt/driver.py:5004 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -245,7 +264,7 @@ msgstr "" msgid "Unknown base file: %s" msgstr "" -#: nova/virt/libvirt/lvm.py:68 +#: nova/virt/libvirt/lvm.py:67 #, python-format msgid "" "Volume group %(vg)s will not be able to hold sparse volume %(lv)s. " @@ -257,12 +276,12 @@ msgstr "" msgid "systool is not installed" msgstr "" -#: nova/virt/libvirt/utils.py:242 +#: nova/virt/libvirt/utils.py:248 #, python-format msgid "rbd remove %(name)s in pool %(pool)s failed" msgstr "" -#: nova/virt/libvirt/vif.py:827 +#: nova/virt/libvirt/vif.py:767 #, python-format msgid "" "VIF driver \"%s\" is marked as deprecated and will be removed in the Juno" @@ -274,56 +293,56 @@ msgstr "" msgid "Unknown content in connection_info/qos_specs: %s" msgstr "" -#: nova/virt/libvirt/volume.py:294 +#: nova/virt/libvirt/volume.py:297 #, python-format msgid "" "ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try " "number: %(tries)s" msgstr "" -#: nova/virt/libvirt/volume.py:361 +#: nova/virt/libvirt/volume.py:364 #, python-format msgid "Unable to delete volume device %s" msgstr "" -#: nova/virt/libvirt/volume.py:372 +#: nova/virt/libvirt/volume.py:375 #, python-format msgid "" "Failed to remove multipath device descriptor %(dev_mapper)s. Exception " "message: %(msg)s" msgstr "" -#: nova/virt/libvirt/volume.py:694 nova/virt/libvirt/volume.py:843 +#: nova/virt/libvirt/volume.py:697 nova/virt/libvirt/volume.py:846 #, python-format msgid "%s is already mounted" msgstr "" -#: nova/virt/libvirt/volume.py:739 +#: nova/virt/libvirt/volume.py:742 #, python-format msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s" msgstr "" -#: nova/virt/libvirt/volume.py:931 +#: nova/virt/libvirt/volume.py:934 #, python-format msgid "" "Fibre volume not yet found at: %(mount_device)s. Will rescan & retry. " "Try number: %(tries)s" msgstr "" -#: nova/virt/libvirt/volume.py:1033 +#: nova/virt/libvirt/volume.py:1036 msgid "Value required for 'scality_sofs_config'" msgstr "" -#: nova/virt/libvirt/volume.py:1044 +#: nova/virt/libvirt/volume.py:1047 #, python-format msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: nova/virt/libvirt/volume.py:1050 +#: nova/virt/libvirt/volume.py:1053 msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: nova/virt/libvirt/volume.py:1065 +#: nova/virt/libvirt/volume.py:1068 msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot index b66ba059c1..54341e37c1 100644 --- a/nova/locale/nova.pot +++ b/nova/locale/nova.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n" +"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,39 +17,39 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/block_device.py:99 +#: nova/block_device.py:100 msgid "Some fields are invalid." msgstr "" -#: nova/block_device.py:109 +#: nova/block_device.py:110 msgid "Some required fields are missing" msgstr "" -#: nova/block_device.py:125 +#: nova/block_device.py:126 msgid "Boot index is invalid." msgstr "" -#: nova/block_device.py:168 +#: nova/block_device.py:169 msgid "Unrecognized legacy format." msgstr "" -#: nova/block_device.py:185 +#: nova/block_device.py:186 msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:189 +#: nova/block_device.py:190 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:368 +#: nova/block_device.py:369 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:372 +#: nova/block_device.py:373 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:382 +#: nova/block_device.py:383 msgid "Invalid volume_size." msgstr "" @@ -399,88 +399,88 @@ msgstr "" msgid "Failed to deploy instance: %(reason)s" msgstr "" -#: nova/exception.py:402 +#: nova/exception.py:402 nova/exception.py:406 #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "" -#: nova/exception.py:406 +#: nova/exception.py:410 msgid "Service is unavailable at this time." msgstr "" -#: nova/exception.py:410 +#: nova/exception.py:414 #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "" -#: nova/exception.py:414 +#: nova/exception.py:418 #, python-format msgid "Connection to the hypervisor is broken on host: %(host)s" msgstr "" -#: nova/exception.py:418 +#: nova/exception.py:422 #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "" -#: nova/exception.py:422 +#: nova/exception.py:426 #, python-format msgid "Compute service of %(host)s is still in use." msgstr "" -#: nova/exception.py:426 +#: nova/exception.py:430 #, python-format msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" -#: nova/exception.py:431 +#: nova/exception.py:435 msgid "The supplied hypervisor type of is invalid." msgstr "" -#: nova/exception.py:435 +#: nova/exception.py:439 msgid "The instance requires a newer hypervisor version than has been provided." msgstr "" -#: nova/exception.py:440 +#: nova/exception.py:444 #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" -#: nova/exception.py:445 +#: nova/exception.py:449 #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "" -#: nova/exception.py:449 +#: nova/exception.py:453 #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "" -#: nova/exception.py:454 +#: nova/exception.py:458 #, python-format msgid "The supplied device (%(device)s) is busy." msgstr "" -#: nova/exception.py:458 +#: nova/exception.py:462 #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "" -#: nova/exception.py:462 +#: nova/exception.py:466 #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "" -#: nova/exception.py:466 +#: nova/exception.py:470 #, python-format msgid "" "VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " "tag is %(tag)s, but the one associated with the port group is %(pgroup)s." msgstr "" -#: nova/exception.py:472 +#: nova/exception.py:476 #, python-format msgid "" "vSwitch which contains the port group %(bridge)s is not associated with " @@ -488,111 +488,111 @@ msgid "" "one associated is %(actual)s." msgstr "" -#: nova/exception.py:479 +#: nova/exception.py:483 #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "" -#: nova/exception.py:483 +#: nova/exception.py:487 #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "" -#: nova/exception.py:487 +#: nova/exception.py:491 #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "" -#: nova/exception.py:491 +#: nova/exception.py:495 #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "" -#: nova/exception.py:495 +#: nova/exception.py:499 #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "" -#: nova/exception.py:499 +#: nova/exception.py:503 #, python-format msgid "Ec2 id %(ec2_id)s is unacceptable." msgstr "" -#: nova/exception.py:503 +#: nova/exception.py:507 #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "" -#: nova/exception.py:507 +#: nova/exception.py:511 #, python-format msgid "Invalid ID received %(id)s." msgstr "" -#: nova/exception.py:511 +#: nova/exception.py:515 msgid "Constraint not met." msgstr "" -#: nova/exception.py:516 +#: nova/exception.py:520 msgid "Resource could not be found." msgstr "" -#: nova/exception.py:521 +#: nova/exception.py:525 #, python-format msgid "No agent-build associated with id %(id)s." msgstr "" -#: nova/exception.py:525 +#: nova/exception.py:529 #, python-format msgid "" "Agent-build with hypervisor %(hypervisor)s os %(os)s architecture " "%(architecture)s exists." msgstr "" -#: nova/exception.py:531 +#: nova/exception.py:535 #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "" -#: nova/exception.py:535 +#: nova/exception.py:539 #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "" -#: nova/exception.py:540 +#: nova/exception.py:544 #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "" -#: nova/exception.py:544 +#: nova/exception.py:548 #, python-format msgid "No disk at %(location)s" msgstr "" -#: nova/exception.py:548 +#: nova/exception.py:552 #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "" -#: nova/exception.py:552 +#: nova/exception.py:556 #, python-format msgid "Invalid image href %(image_href)s." msgstr "" -#: nova/exception.py:556 +#: nova/exception.py:560 #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" -#: nova/exception.py:561 +#: nova/exception.py:565 #, python-format msgid "Image %(image_id)s could not be found." msgstr "" -#: nova/exception.py:565 +#: nova/exception.py:569 msgid "The current driver does not support preserving ephemeral partitions." msgstr "" -#: nova/exception.py:571 +#: nova/exception.py:575 #, python-format msgid "" "Image %(image_id)s could not be found. The nova EC2 API assigns image ids" @@ -600,1169 +600,1206 @@ msgid "" "image ids since adding this image?" msgstr "" -#: nova/exception.py:578 +#: nova/exception.py:582 #, python-format msgid "Project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:582 +#: nova/exception.py:586 msgid "Cannot find SR to read/write VDI." msgstr "" -#: nova/exception.py:586 +#: nova/exception.py:590 #, python-format msgid "Network %(network_id)s is duplicated." msgstr "" -#: nova/exception.py:590 +#: nova/exception.py:594 #, python-format msgid "Network %(network_id)s is still in use." msgstr "" -#: nova/exception.py:594 +#: nova/exception.py:598 #, python-format msgid "%(req)s is required to create a network." msgstr "" -#: nova/exception.py:598 +#: nova/exception.py:602 #, python-format msgid "Network %(network_id)s could not be found." msgstr "" -#: nova/exception.py:602 +#: nova/exception.py:606 #, python-format msgid "Port id %(port_id)s could not be found." msgstr "" -#: nova/exception.py:606 +#: nova/exception.py:610 #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "" -#: nova/exception.py:610 +#: nova/exception.py:614 #, python-format msgid "Network could not be found for uuid %(uuid)s" msgstr "" -#: nova/exception.py:614 +#: nova/exception.py:618 #, python-format msgid "Network could not be found with cidr %(cidr)s." msgstr "" -#: nova/exception.py:618 +#: nova/exception.py:622 #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "" -#: nova/exception.py:622 +#: nova/exception.py:626 msgid "No networks defined." msgstr "" -#: nova/exception.py:626 +#: nova/exception.py:630 msgid "No more available networks." msgstr "" -#: nova/exception.py:630 +#: nova/exception.py:634 #, python-format msgid "" "Either network uuid %(network_uuid)s is not present or is not assigned to" " the project %(project_id)s." msgstr "" -#: nova/exception.py:635 +#: nova/exception.py:639 msgid "" "More than one possible network found. Specify network ID(s) to select " "which one(s) to connect to," msgstr "" -#: nova/exception.py:640 +#: nova/exception.py:644 #, python-format msgid "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" -#: nova/exception.py:645 +#: nova/exception.py:649 #, python-format msgid "" "It is not allowed to create an interface on external network " "%(network_uuid)s" msgstr "" -#: nova/exception.py:650 +#: nova/exception.py:654 msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" -#: nova/exception.py:654 +#: nova/exception.py:658 #, python-format msgid "Port %(port_id)s is still in use." msgstr "" -#: nova/exception.py:658 +#: nova/exception.py:662 #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "" -#: nova/exception.py:662 +#: nova/exception.py:666 #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "" -#: nova/exception.py:666 +#: nova/exception.py:670 #, python-format msgid "No free port available for instance %(instance)s." msgstr "" -#: nova/exception.py:670 +#: nova/exception.py:674 #, python-format msgid "Fixed ip %(address)s already exists." msgstr "" -#: nova/exception.py:674 +#: nova/exception.py:678 #, python-format msgid "No fixed IP associated with id %(id)s." msgstr "" -#: nova/exception.py:678 +#: nova/exception.py:682 #, python-format msgid "Fixed ip not found for address %(address)s." msgstr "" -#: nova/exception.py:682 +#: nova/exception.py:686 #, python-format msgid "Instance %(instance_uuid)s has zero fixed ips." msgstr "" -#: nova/exception.py:686 +#: nova/exception.py:690 #, python-format msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." msgstr "" -#: nova/exception.py:691 +#: nova/exception.py:695 #, python-format msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." msgstr "" -#: nova/exception.py:695 +#: nova/exception.py:699 #, python-format msgid "" "Fixed IP address (%(address)s) does not exist in network " "(%(network_uuid)s)." msgstr "" -#: nova/exception.py:700 +#: nova/exception.py:704 #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s." msgstr "" -#: nova/exception.py:705 +#: nova/exception.py:709 #, python-format msgid "More than one instance is associated with fixed ip address '%(address)s'." msgstr "" -#: nova/exception.py:710 +#: nova/exception.py:714 #, python-format msgid "Fixed IP address %(address)s is invalid." msgstr "" -#: nova/exception.py:715 +#: nova/exception.py:719 msgid "Zero fixed ips available." msgstr "" -#: nova/exception.py:719 +#: nova/exception.py:723 msgid "Zero fixed ips could be found." msgstr "" -#: nova/exception.py:723 +#: nova/exception.py:727 #, python-format msgid "Floating ip %(address)s already exists." msgstr "" -#: nova/exception.py:728 +#: nova/exception.py:732 #, python-format msgid "Floating ip not found for id %(id)s." msgstr "" -#: nova/exception.py:732 +#: nova/exception.py:736 #, python-format msgid "The DNS entry %(name)s already exists in domain %(domain)s." msgstr "" -#: nova/exception.py:736 +#: nova/exception.py:740 #, python-format msgid "Floating ip not found for address %(address)s." msgstr "" -#: nova/exception.py:740 +#: nova/exception.py:744 #, python-format msgid "Floating ip not found for host %(host)s." msgstr "" -#: nova/exception.py:744 +#: nova/exception.py:748 #, python-format msgid "Multiple floating ips are found for address %(address)s." msgstr "" -#: nova/exception.py:748 +#: nova/exception.py:752 msgid "Floating ip pool not found." msgstr "" -#: nova/exception.py:753 +#: nova/exception.py:757 msgid "Zero floating ips available." msgstr "" -#: nova/exception.py:759 +#: nova/exception.py:763 #, python-format msgid "Floating ip %(address)s is associated." msgstr "" -#: nova/exception.py:763 +#: nova/exception.py:767 #, python-format msgid "Floating ip %(address)s is not associated." msgstr "" -#: nova/exception.py:767 +#: nova/exception.py:771 msgid "Zero floating ips exist." msgstr "" -#: nova/exception.py:772 +#: nova/exception.py:776 #, python-format msgid "Interface %(interface)s not found." msgstr "" -#: nova/exception.py:777 nova/api/openstack/compute/contrib/floating_ips.py:97 +#: nova/exception.py:781 nova/api/openstack/compute/contrib/floating_ips.py:97 msgid "Cannot disassociate auto assigned floating ip" msgstr "" -#: nova/exception.py:782 +#: nova/exception.py:786 #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "" -#: nova/exception.py:786 +#: nova/exception.py:790 #, python-format msgid "Service %(service_id)s could not be found." msgstr "" -#: nova/exception.py:790 +#: nova/exception.py:794 #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "" -#: nova/exception.py:794 +#: nova/exception.py:798 #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "" -#: nova/exception.py:798 +#: nova/exception.py:802 #, python-format msgid "Host %(host)s could not be found." msgstr "" -#: nova/exception.py:802 +#: nova/exception.py:806 #, python-format msgid "Compute host %(host)s could not be found." msgstr "" -#: nova/exception.py:806 +#: nova/exception.py:810 #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: nova/exception.py:810 +#: nova/exception.py:814 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: nova/exception.py:814 +#: nova/exception.py:818 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " "%(unders)s" msgstr "" -#: nova/exception.py:819 +#: nova/exception.py:823 msgid "Quota could not be found" msgstr "" -#: nova/exception.py:823 +#: nova/exception.py:827 #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" -#: nova/exception.py:828 +#: nova/exception.py:832 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "" -#: nova/exception.py:832 +#: nova/exception.py:836 #, python-format msgid "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:837 +#: nova/exception.py:841 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:841 +#: nova/exception.py:845 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "" -#: nova/exception.py:845 +#: nova/exception.py:849 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:849 +#: nova/exception.py:853 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: nova/exception.py:853 +#: nova/exception.py:857 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: nova/exception.py:857 +#: nova/exception.py:861 #, python-format msgid "Security group %(security_group_id)s not found." msgstr "" -#: nova/exception.py:861 +#: nova/exception.py:865 #, python-format msgid "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" -#: nova/exception.py:866 +#: nova/exception.py:870 #, python-format msgid "Security group with rule %(rule_id)s not found." msgstr "" -#: nova/exception.py:871 +#: nova/exception.py:875 #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" -#: nova/exception.py:876 +#: nova/exception.py:880 #, python-format msgid "" "Security group %(security_group_id)s is already associated with the " "instance %(instance_id)s" msgstr "" -#: nova/exception.py:881 +#: nova/exception.py:885 #, python-format msgid "" "Security group %(security_group_id)s is not associated with the instance " "%(instance_id)s" msgstr "" -#: nova/exception.py:886 +#: nova/exception.py:890 #, python-format msgid "Security group default rule (%rule_id)s not found." msgstr "" -#: nova/exception.py:890 +#: nova/exception.py:894 msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" -#: nova/exception.py:896 +#: nova/exception.py:900 #, python-format msgid "Rule already exists in group: %(rule)s" msgstr "" -#: nova/exception.py:900 +#: nova/exception.py:904 msgid "No Unique Match Found." msgstr "" -#: nova/exception.py:905 +#: nova/exception.py:909 #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "" -#: nova/exception.py:909 +#: nova/exception.py:913 #, python-format msgid "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" -#: nova/exception.py:914 +#: nova/exception.py:918 #, python-format msgid "Console pool %(pool_id)s could not be found." msgstr "" -#: nova/exception.py:918 +#: nova/exception.py:922 #, python-format msgid "" "Console pool with host %(host)s, console_type %(console_type)s and " "compute_host %(compute_host)s already exists." msgstr "" -#: nova/exception.py:924 +#: nova/exception.py:928 #, python-format msgid "" "Console pool of type %(console_type)s for compute host %(compute_host)s " "on proxy host %(host)s not found." msgstr "" -#: nova/exception.py:930 +#: nova/exception.py:934 #, python-format msgid "Console %(console_id)s could not be found." msgstr "" -#: nova/exception.py:934 +#: nova/exception.py:938 #, python-format msgid "Console for instance %(instance_uuid)s could not be found." msgstr "" -#: nova/exception.py:938 +#: nova/exception.py:942 #, python-format msgid "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " "found." msgstr "" -#: nova/exception.py:943 +#: nova/exception.py:947 #, python-format msgid "Invalid console type %(console_type)s" msgstr "" -#: nova/exception.py:947 +#: nova/exception.py:951 #, python-format msgid "Unavailable console type %(console_type)s." msgstr "" -#: nova/exception.py:951 +#: nova/exception.py:955 #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" -#: nova/exception.py:956 +#: nova/exception.py:960 #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "" -#: nova/exception.py:960 +#: nova/exception.py:964 #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "" -#: nova/exception.py:964 +#: nova/exception.py:968 #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" -#: nova/exception.py:969 +#: nova/exception.py:973 +#, python-format +msgid "" +"Flavor %(id)d extra spec cannot be updated or created after %(retries)d " +"retries." +msgstr "" + +#: nova/exception.py:978 #, python-format msgid "Cell %(cell_name)s doesn't exist." msgstr "" -#: nova/exception.py:973 +#: nova/exception.py:982 #, python-format msgid "Cell with name %(name)s already exists." msgstr "" -#: nova/exception.py:977 +#: nova/exception.py:986 #, python-format msgid "Inconsistency in cell routing: %(reason)s" msgstr "" -#: nova/exception.py:981 +#: nova/exception.py:990 #, python-format msgid "Service API method not found: %(detail)s" msgstr "" -#: nova/exception.py:985 +#: nova/exception.py:994 msgid "Timeout waiting for response from cell" msgstr "" -#: nova/exception.py:989 +#: nova/exception.py:998 #, python-format msgid "Cell message has reached maximum hop count: %(hop_count)s" msgstr "" -#: nova/exception.py:993 +#: nova/exception.py:1002 msgid "No cells available matching scheduling criteria." msgstr "" -#: nova/exception.py:997 +#: nova/exception.py:1006 msgid "Cannot update cells configuration file." msgstr "" -#: nova/exception.py:1001 +#: nova/exception.py:1010 #, python-format msgid "Cell is not known for instance %(instance_uuid)s" msgstr "" -#: nova/exception.py:1005 +#: nova/exception.py:1014 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: nova/exception.py:1009 +#: nova/exception.py:1018 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" -#: nova/exception.py:1014 +#: nova/exception.py:1023 #, python-format msgid "" "Metric %(name)s could not be found on the compute host node " "%(host)s.%(node)s." msgstr "" -#: nova/exception.py:1019 +#: nova/exception.py:1028 #, python-format msgid "File %(file_path)s could not be found." msgstr "" -#: nova/exception.py:1023 +#: nova/exception.py:1032 msgid "Zero files could be found." msgstr "" -#: nova/exception.py:1027 +#: nova/exception.py:1036 #, python-format msgid "Virtual switch associated with the network adapter %(adapter)s not found." msgstr "" -#: nova/exception.py:1032 +#: nova/exception.py:1041 #, python-format msgid "Network adapter %(adapter)s could not be found." msgstr "" -#: nova/exception.py:1036 +#: nova/exception.py:1045 #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "" -#: nova/exception.py:1040 +#: nova/exception.py:1049 msgid "Action not allowed." msgstr "" -#: nova/exception.py:1044 +#: nova/exception.py:1053 msgid "Rotation is not allowed for snapshots" msgstr "" -#: nova/exception.py:1048 +#: nova/exception.py:1057 msgid "Rotation param is required for backup image_type" msgstr "" -#: nova/exception.py:1053 nova/tests/compute/test_keypairs.py:144 +#: nova/exception.py:1062 nova/tests/compute/test_keypairs.py:144 #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "" -#: nova/exception.py:1057 +#: nova/exception.py:1066 #, python-format msgid "Instance %(name)s already exists." msgstr "" -#: nova/exception.py:1061 +#: nova/exception.py:1070 #, python-format msgid "Flavor with name %(name)s already exists." msgstr "" -#: nova/exception.py:1065 +#: nova/exception.py:1074 #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "" -#: nova/exception.py:1069 +#: nova/exception.py:1078 #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" -#: nova/exception.py:1074 +#: nova/exception.py:1083 #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "" -#: nova/exception.py:1078 +#: nova/exception.py:1087 #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "" -#: nova/exception.py:1082 +#: nova/exception.py:1091 #, python-format msgid "Storage error: %(reason)s" msgstr "" -#: nova/exception.py:1086 +#: nova/exception.py:1095 #, python-format msgid "Migration error: %(reason)s" msgstr "" -#: nova/exception.py:1090 +#: nova/exception.py:1099 #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "" -#: nova/exception.py:1094 +#: nova/exception.py:1103 #, python-format msgid "Malformed message body: %(reason)s" msgstr "" -#: nova/exception.py:1100 +#: nova/exception.py:1109 #, python-format msgid "Could not find config at %(path)s" msgstr "" -#: nova/exception.py:1104 +#: nova/exception.py:1113 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: nova/exception.py:1108 +#: nova/exception.py:1117 msgid "When resizing, instances must change flavor!" msgstr "" -#: nova/exception.py:1112 +#: nova/exception.py:1121 #, python-format msgid "Resize error: %(reason)s" msgstr "" -#: nova/exception.py:1116 +#: nova/exception.py:1125 #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" -#: nova/exception.py:1120 +#: nova/exception.py:1129 msgid "Flavor's memory is too small for requested image." msgstr "" -#: nova/exception.py:1124 +#: nova/exception.py:1133 msgid "Flavor's disk is too small for requested image." msgstr "" -#: nova/exception.py:1128 +#: nova/exception.py:1137 #, python-format msgid "Insufficient free memory on compute node to start %(uuid)s." msgstr "" -#: nova/exception.py:1132 +#: nova/exception.py:1141 #, python-format msgid "No valid host was found. %(reason)s" msgstr "" -#: nova/exception.py:1137 +#: nova/exception.py:1146 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "" -#: nova/exception.py:1144 +#: nova/exception.py:1153 #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " "%(used)d of %(allowed)d %(resource)s" msgstr "" -#: nova/exception.py:1149 +#: nova/exception.py:1158 msgid "Maximum number of floating ips exceeded" msgstr "" -#: nova/exception.py:1153 +#: nova/exception.py:1162 msgid "Maximum number of fixed ips exceeded" msgstr "" -#: nova/exception.py:1157 +#: nova/exception.py:1166 #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "" -#: nova/exception.py:1161 +#: nova/exception.py:1170 msgid "Personality file limit exceeded" msgstr "" -#: nova/exception.py:1165 +#: nova/exception.py:1174 msgid "Personality file path too long" msgstr "" -#: nova/exception.py:1169 +#: nova/exception.py:1178 msgid "Personality file content too long" msgstr "" -#: nova/exception.py:1173 nova/tests/compute/test_keypairs.py:155 +#: nova/exception.py:1182 nova/tests/compute/test_keypairs.py:155 msgid "Maximum number of key pairs exceeded" msgstr "" -#: nova/exception.py:1178 +#: nova/exception.py:1187 msgid "Maximum number of security groups or rules exceeded" msgstr "" -#: nova/exception.py:1182 +#: nova/exception.py:1191 msgid "Maximum number of ports exceeded" msgstr "" -#: nova/exception.py:1186 +#: nova/exception.py:1195 #, python-format msgid "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " "%(reason)s." msgstr "" -#: nova/exception.py:1191 +#: nova/exception.py:1200 #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "" -#: nova/exception.py:1195 +#: nova/exception.py:1204 #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "" -#: nova/exception.py:1199 +#: nova/exception.py:1208 #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "" -#: nova/exception.py:1203 +#: nova/exception.py:1212 #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: nova/exception.py:1208 +#: nova/exception.py:1217 #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "" -#: nova/exception.py:1212 +#: nova/exception.py:1221 msgid "Unable to create flavor" msgstr "" -#: nova/exception.py:1216 +#: nova/exception.py:1225 #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" -#: nova/exception.py:1222 +#: nova/exception.py:1231 #, python-format msgid "Detected existing vlan with id %(vlan)d" msgstr "" -#: nova/exception.py:1226 +#: nova/exception.py:1235 msgid "There was a conflict when trying to complete your request." msgstr "" -#: nova/exception.py:1232 +#: nova/exception.py:1241 #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "" -#: nova/exception.py:1236 +#: nova/exception.py:1245 #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" -#: nova/exception.py:1241 +#: nova/exception.py:1250 #, python-format msgid "Node %(node_id)s could not be found." msgstr "" -#: nova/exception.py:1245 +#: nova/exception.py:1254 #, python-format msgid "Node with UUID %(node_uuid)s could not be found." msgstr "" -#: nova/exception.py:1249 +#: nova/exception.py:1258 #, python-format msgid "Marker %(marker)s could not be found." msgstr "" -#: nova/exception.py:1254 +#: nova/exception.py:1263 #, python-format msgid "Invalid id: %(val)s (expecting \"i-...\")." msgstr "" -#: nova/exception.py:1258 +#: nova/exception.py:1267 #, python-format msgid "Could not fetch image %(image_id)s" msgstr "" -#: nova/exception.py:1262 +#: nova/exception.py:1271 #, python-format msgid "Could not upload image %(image_id)s" msgstr "" -#: nova/exception.py:1266 +#: nova/exception.py:1275 #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "" -#: nova/exception.py:1270 +#: nova/exception.py:1279 #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "" -#: nova/exception.py:1274 +#: nova/exception.py:1283 #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "" -#: nova/exception.py:1278 +#: nova/exception.py:1287 #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "" -#: nova/exception.py:1282 +#: nova/exception.py:1291 #, python-format msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" -#: nova/exception.py:1287 +#: nova/exception.py:1296 #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" -#: nova/exception.py:1292 +#: nova/exception.py:1301 #, python-format msgid "Failed to attach network adapter device to %(instance)s" msgstr "" -#: nova/exception.py:1296 +#: nova/exception.py:1305 #, python-format msgid "Failed to detach network adapter device from %(instance)s" msgstr "" -#: nova/exception.py:1300 +#: nova/exception.py:1309 #, python-format msgid "" "User data too large. User data must be no larger than %(maxsize)s bytes " "once base64 encoded. Your data is %(length)d bytes" msgstr "" -#: nova/exception.py:1306 +#: nova/exception.py:1315 msgid "User data needs to be valid base 64." msgstr "" -#: nova/exception.py:1310 +#: nova/exception.py:1319 #, python-format msgid "" "Unexpected task state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1319 +#: nova/exception.py:1328 #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not " "found" msgstr "" -#: nova/exception.py:1324 +#: nova/exception.py:1333 #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "" -#: nova/exception.py:1328 +#: nova/exception.py:1337 #, python-format msgid "" "Unexpected VM state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1333 +#: nova/exception.py:1342 #, python-format msgid "The CA file for %(project)s could not be found" msgstr "" -#: nova/exception.py:1337 +#: nova/exception.py:1346 #, python-format msgid "The CRL file for %(project)s could not be found" msgstr "" -#: nova/exception.py:1341 +#: nova/exception.py:1350 msgid "Instance recreate is not supported." msgstr "" -#: nova/exception.py:1345 +#: nova/exception.py:1354 #, python-format msgid "" "The service from servicegroup driver %(driver)s is temporarily " "unavailable." msgstr "" -#: nova/exception.py:1350 +#: nova/exception.py:1359 #, python-format msgid "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" -#: nova/exception.py:1355 +#: nova/exception.py:1364 #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" -#: nova/exception.py:1360 +#: nova/exception.py:1369 #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt " "driver" msgstr "" -#: nova/exception.py:1365 +#: nova/exception.py:1374 #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "" -#: nova/exception.py:1369 +#: nova/exception.py:1378 #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "" -#: nova/exception.py:1373 +#: nova/exception.py:1382 #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" -#: nova/exception.py:1378 +#: nova/exception.py:1387 #, python-format msgid "Shadow table with name %(name)s already exists." msgstr "" -#: nova/exception.py:1383 +#: nova/exception.py:1392 #, python-format msgid "Instance rollback performed due to: %s" msgstr "" -#: nova/exception.py:1389 +#: nova/exception.py:1398 #, python-format msgid "Unsupported object type %(objtype)s" msgstr "" -#: nova/exception.py:1393 +#: nova/exception.py:1402 #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "" -#: nova/exception.py:1397 +#: nova/exception.py:1406 #, python-format msgid "Version %(objver)s of %(objname)s is not supported" msgstr "" -#: nova/exception.py:1401 +#: nova/exception.py:1410 #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "" -#: nova/exception.py:1405 +#: nova/exception.py:1414 #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "" -#: nova/exception.py:1409 +#: nova/exception.py:1418 #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "" -#: nova/exception.py:1413 +#: nova/exception.py:1422 #, python-format msgid "Core API extensions are missing: %(missing_apis)s" msgstr "" -#: nova/exception.py:1417 +#: nova/exception.py:1426 #, python-format msgid "Error during following call to agent: %(method)s" msgstr "" -#: nova/exception.py:1421 +#: nova/exception.py:1430 #, python-format msgid "Unable to contact guest agent. The following call timed out: %(method)s" msgstr "" -#: nova/exception.py:1426 +#: nova/exception.py:1435 #, python-format msgid "Agent does not support the call: %(method)s" msgstr "" -#: nova/exception.py:1430 +#: nova/exception.py:1439 #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "" -#: nova/exception.py:1434 +#: nova/exception.py:1443 #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "" -#: nova/exception.py:1438 +#: nova/exception.py:1447 #, python-format msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s." msgstr "" -#: nova/exception.py:1443 +#: nova/exception.py:1452 #, python-format msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s." msgstr "" -#: nova/exception.py:1448 +#: nova/exception.py:1457 #, python-format msgid "Instance group %(group_uuid)s has no policy %(policy)s." msgstr "" -#: nova/exception.py:1452 +#: nova/exception.py:1461 #, python-format msgid "Number of retries to plugin (%(num_retries)d) exceeded." msgstr "" -#: nova/exception.py:1456 +#: nova/exception.py:1465 #, python-format msgid "There was an error with the download module %(module)s. %(reason)s" msgstr "" -#: nova/exception.py:1461 +#: nova/exception.py:1470 #, python-format msgid "" "The metadata for this location will not work with this module %(module)s." " %(reason)s." msgstr "" -#: nova/exception.py:1466 +#: nova/exception.py:1475 #, python-format msgid "The method %(method_name)s is not implemented." msgstr "" -#: nova/exception.py:1470 +#: nova/exception.py:1479 #, python-format msgid "The module %(module)s is misconfigured: %(reason)s." msgstr "" -#: nova/exception.py:1474 +#: nova/exception.py:1483 #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "" -#: nova/exception.py:1478 +#: nova/exception.py:1487 #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "" -#: nova/exception.py:1482 +#: nova/exception.py:1491 #, python-format msgid "PCI device %(id)s not found" msgstr "" -#: nova/exception.py:1486 +#: nova/exception.py:1495 #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "" -#: nova/exception.py:1490 +#: nova/exception.py:1499 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" -#: nova/exception.py:1496 +#: nova/exception.py:1505 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead " "of %(hopeowner)s" msgstr "" -#: nova/exception.py:1502 +#: nova/exception.py:1511 #, python-format msgid "PCI device request (%requests)s failed" msgstr "" -#: nova/exception.py:1507 +#: nova/exception.py:1516 #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty " "pool" msgstr "" -#: nova/exception.py:1513 +#: nova/exception.py:1522 #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "" -#: nova/exception.py:1517 +#: nova/exception.py:1526 #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "" -#: nova/exception.py:1522 +#: nova/exception.py:1531 #, python-format msgid "Not enough parameters: %(reason)s" msgstr "" -#: nova/exception.py:1527 +#: nova/exception.py:1536 #, python-format msgid "Invalid PCI devices Whitelist config %(reason)s" msgstr "" -#: nova/exception.py:1531 +#: nova/exception.py:1540 #, python-format msgid "Cannot change %(node_id)s to %(new_node_id)s" msgstr "" -#: nova/exception.py:1541 +#: nova/exception.py:1550 #, python-format msgid "" "Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: " "%(reason)s" msgstr "" -#: nova/exception.py:1546 +#: nova/exception.py:1555 #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "" -#: nova/exception.py:1550 +#: nova/exception.py:1559 #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "" -#: nova/exception.py:1554 +#: nova/exception.py:1563 #, python-format msgid "Key manager error: %(reason)s" msgstr "" -#: nova/exception.py:1558 +#: nova/exception.py:1567 #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "" -#: nova/exception.py:1562 +#: nova/exception.py:1571 #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "" -#: nova/exception.py:1566 +#: nova/exception.py:1575 #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" -#: nova/exception.py:1571 +#: nova/exception.py:1580 #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the " "maximum allowed by flavor %(max_vram)d." msgstr "" -#: nova/exception.py:1576 +#: nova/exception.py:1585 #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "" -#: nova/exception.py:1580 +#: nova/exception.py:1589 msgid "" -"Block migration of instances with config drives is not supported in " -"libvirt." +"Live migration of instances with config drives is not supported in " +"libvirt unless libvirt instance path and drive data is shared across " +"compute nodes." msgstr "" -#: nova/exception.py:1585 +#: nova/exception.py:1595 +#, python-format +msgid "" +"Host %(server)s is running an old version of Nova, live migrations " +"involving that version may cause data loss. Upgrade Nova on %(server)s " +"and try again." +msgstr "" + +#: nova/exception.py:1601 #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" +#: nova/exception.py:1605 +#, python-format +msgid "" +"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted " +"%(maxsockets)d:%(maxcores)d:%(maxthreads)d" +msgstr "" + +#: nova/exception.py:1610 +#, python-format +msgid "" +"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted " +"%(maxsockets)d:%(maxcores)d:%(maxthreads)d" +msgstr "" + +#: nova/exception.py:1615 +#, python-format +msgid "" +"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to" +" satisfy for vcpus count %(vcpus)d" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -1786,114 +1823,114 @@ msgstr "" msgid "Failed to roll back reservations %s" msgstr "" -#: nova/service.py:160 +#: nova/service.py:161 #, python-format msgid "Starting %(topic)s node (version %(version)s)" msgstr "" -#: nova/service.py:285 +#: nova/service.py:286 msgid "Service killed that has no database entry" msgstr "" -#: nova/service.py:297 +#: nova/service.py:298 msgid "Service error occurred during cleanup_host" msgstr "" -#: nova/service.py:314 +#: nova/service.py:315 #, python-format msgid "Temporary directory is invalid: %s" msgstr "" -#: nova/service.py:339 +#: nova/service.py:340 #, python-format msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0" msgstr "" -#: nova/service.py:424 +#: nova/service.py:433 msgid "serve() can only be called once" msgstr "" -#: nova/utils.py:148 +#: nova/utils.py:147 #, python-format msgid "Expected to receive %(exp)s bytes, but actually %(act)s" msgstr "" -#: nova/utils.py:354 +#: nova/utils.py:353 #, python-format msgid "Couldn't get IPv4 : %(ex)s" msgstr "" -#: nova/utils.py:370 +#: nova/utils.py:369 #, python-format msgid "IPv4 address is not found.: %s" msgstr "" -#: nova/utils.py:373 +#: nova/utils.py:372 #, python-format msgid "Couldn't get IPv4 of %(interface)s : %(ex)s" msgstr "" -#: nova/utils.py:388 +#: nova/utils.py:387 #, python-format msgid "Link Local address is not found.:%s" msgstr "" -#: nova/utils.py:391 +#: nova/utils.py:390 #, python-format msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/utils.py:412 +#: nova/utils.py:411 #, python-format msgid "Invalid backend: %s" msgstr "" -#: nova/utils.py:457 +#: nova/utils.py:454 #, python-format msgid "Expected object of type: %s" msgstr "" -#: nova/utils.py:485 +#: nova/utils.py:482 #, python-format msgid "Invalid server_string: %s" msgstr "" -#: nova/utils.py:776 nova/virt/configdrive.py:177 +#: nova/utils.py:773 #, python-format msgid "Could not remove tmpdir: %s" msgstr "" -#: nova/utils.py:966 +#: nova/utils.py:963 #, python-format msgid "%s is not a string or unicode" msgstr "" -#: nova/utils.py:970 +#: nova/utils.py:967 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" -#: nova/utils.py:975 +#: nova/utils.py:972 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "" -#: nova/utils.py:985 +#: nova/utils.py:982 #, python-format msgid "%(value_name)s must be an integer" msgstr "" -#: nova/utils.py:991 +#: nova/utils.py:988 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "" -#: nova/utils.py:997 +#: nova/utils.py:994 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "" -#: nova/utils.py:1031 +#: nova/utils.py:1028 #, python-format msgid "Hypervisor version %s is invalid." msgstr "" @@ -1903,51 +1940,51 @@ msgstr "" msgid "Failed to load %(cfgfile)s: %(ex)s" msgstr "" -#: nova/wsgi.py:132 +#: nova/wsgi.py:133 #, python-format msgid "Could not bind to %(host)s:%(port)s" msgstr "" -#: nova/wsgi.py:137 +#: nova/wsgi.py:138 #, python-format msgid "%(name)s listening on %(host)s:%(port)s" msgstr "" -#: nova/wsgi.py:152 nova/openstack/common/sslutils.py:50 +#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:50 #, python-format msgid "Unable to find cert_file : %s" msgstr "" -#: nova/wsgi.py:156 nova/openstack/common/sslutils.py:53 +#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:53 #, python-format msgid "Unable to find ca_file : %s" msgstr "" -#: nova/wsgi.py:160 nova/openstack/common/sslutils.py:56 +#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:56 #, python-format msgid "Unable to find key_file : %s" msgstr "" -#: nova/wsgi.py:164 nova/openstack/common/sslutils.py:59 +#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:59 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" -#: nova/wsgi.py:195 +#: nova/wsgi.py:202 #, python-format msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support" msgstr "" -#: nova/wsgi.py:223 +#: nova/wsgi.py:238 msgid "Stopping WSGI server." msgstr "" -#: nova/wsgi.py:242 +#: nova/wsgi.py:258 msgid "WSGI server has stopped." msgstr "" -#: nova/wsgi.py:311 +#: nova/wsgi.py:327 msgid "You must implement __call__" msgstr "" @@ -2024,199 +2061,201 @@ msgstr "" msgid "Unknown error occurred." msgstr "" -#: nova/api/ec2/cloud.py:395 +#: nova/api/ec2/cloud.py:391 #, python-format msgid "Create snapshot of volume %s" msgstr "" -#: nova/api/ec2/cloud.py:420 +#: nova/api/ec2/cloud.py:416 #, python-format msgid "Could not find key pair(s): %s" msgstr "" -#: nova/api/ec2/cloud.py:436 +#: nova/api/ec2/cloud.py:432 #, python-format msgid "Create key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:448 +#: nova/api/ec2/cloud.py:444 #, python-format msgid "Import key %s" msgstr "" -#: nova/api/ec2/cloud.py:461 +#: nova/api/ec2/cloud.py:457 #, python-format msgid "Delete key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:603 nova/api/ec2/cloud.py:733 +#: nova/api/ec2/cloud.py:599 nova/api/ec2/cloud.py:729 msgid "need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:608 +#: nova/api/ec2/cloud.py:604 msgid "can't build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:616 +#: nova/api/ec2/cloud.py:612 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "" -#: nova/api/ec2/cloud.py:650 nova/api/ec2/cloud.py:686 +#: nova/api/ec2/cloud.py:646 nova/api/ec2/cloud.py:682 msgid "No rule for the specified parameters." msgstr "" -#: nova/api/ec2/cloud.py:764 +#: nova/api/ec2/cloud.py:760 #, python-format msgid "Get console output for instance %s" msgstr "" -#: nova/api/ec2/cloud.py:836 +#: nova/api/ec2/cloud.py:832 #, python-format msgid "Create volume from snapshot %s" msgstr "" -#: nova/api/ec2/cloud.py:840 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:836 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "" -#: nova/api/ec2/cloud.py:880 +#: nova/api/ec2/cloud.py:876 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/api/ec2/cloud.py:910 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:906 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "" -#: nova/api/ec2/cloud.py:1242 +#: nova/api/ec2/cloud.py:1238 msgid "Allocate address" msgstr "" -#: nova/api/ec2/cloud.py:1247 +#: nova/api/ec2/cloud.py:1243 #, python-format msgid "Release address %s" msgstr "" -#: nova/api/ec2/cloud.py:1252 +#: nova/api/ec2/cloud.py:1248 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1262 +#: nova/api/ec2/cloud.py:1258 msgid "Unable to associate IP Address, no fixed_ips." msgstr "" -#: nova/api/ec2/cloud.py:1270 -#: nova/api/openstack/compute/contrib/floating_ips.py:249 +#: nova/api/ec2/cloud.py:1266 +#: nova/api/openstack/compute/contrib/floating_ips.py:251 #, python-format msgid "multiple fixed_ips exist, using the first: %s" msgstr "" -#: nova/api/ec2/cloud.py:1283 +#: nova/api/ec2/cloud.py:1279 #, python-format msgid "Disassociate address %s" msgstr "" -#: nova/api/ec2/cloud.py:1300 nova/api/openstack/compute/servers.py:918 +#: nova/api/ec2/cloud.py:1296 nova/api/openstack/compute/servers.py:918 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "" -#: nova/api/ec2/cloud.py:1332 +#: nova/api/ec2/cloud.py:1328 msgid "Image must be available" msgstr "" -#: nova/api/ec2/cloud.py:1429 +#: nova/api/ec2/cloud.py:1424 #, python-format msgid "Reboot instance %r" msgstr "" -#: nova/api/ec2/cloud.py:1542 +#: nova/api/ec2/cloud.py:1537 #, python-format msgid "De-registering image %s" msgstr "" -#: nova/api/ec2/cloud.py:1558 +#: nova/api/ec2/cloud.py:1553 msgid "imageLocation is required" msgstr "" -#: nova/api/ec2/cloud.py:1578 +#: nova/api/ec2/cloud.py:1573 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1639 +#: nova/api/ec2/cloud.py:1634 msgid "user or group not specified" msgstr "" -#: nova/api/ec2/cloud.py:1642 +#: nova/api/ec2/cloud.py:1637 msgid "only group \"all\" is supported" msgstr "" -#: nova/api/ec2/cloud.py:1645 +#: nova/api/ec2/cloud.py:1640 msgid "operation_type must be add or remove" msgstr "" -#: nova/api/ec2/cloud.py:1647 +#: nova/api/ec2/cloud.py:1642 #, python-format msgid "Updating image %s publicity" msgstr "" -#: nova/api/ec2/cloud.py:1660 +#: nova/api/ec2/cloud.py:1655 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "" -#: nova/api/ec2/cloud.py:1686 +#: nova/api/ec2/cloud.py:1685 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" msgstr "" -#: nova/api/ec2/cloud.py:1717 +#: nova/api/ec2/cloud.py:1718 #, python-format -msgid "Couldn't stop instance within %d sec" +msgid "" +"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " +"%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1736 +#: nova/api/ec2/cloud.py:1742 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "" -#: nova/api/ec2/cloud.py:1761 nova/api/ec2/cloud.py:1811 +#: nova/api/ec2/cloud.py:1767 nova/api/ec2/cloud.py:1817 msgid "resource_id and tag are required" msgstr "" -#: nova/api/ec2/cloud.py:1765 nova/api/ec2/cloud.py:1815 +#: nova/api/ec2/cloud.py:1771 nova/api/ec2/cloud.py:1821 msgid "Expecting a list of resources" msgstr "" -#: nova/api/ec2/cloud.py:1770 nova/api/ec2/cloud.py:1820 -#: nova/api/ec2/cloud.py:1878 +#: nova/api/ec2/cloud.py:1776 nova/api/ec2/cloud.py:1826 +#: nova/api/ec2/cloud.py:1884 msgid "Only instances implemented" msgstr "" -#: nova/api/ec2/cloud.py:1774 nova/api/ec2/cloud.py:1824 +#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1830 msgid "Expecting a list of tagSets" msgstr "" -#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1833 +#: nova/api/ec2/cloud.py:1786 nova/api/ec2/cloud.py:1839 msgid "Expecting tagSet to be key/value pairs" msgstr "" -#: nova/api/ec2/cloud.py:1787 +#: nova/api/ec2/cloud.py:1793 msgid "Expecting both key and value to be set" msgstr "" -#: nova/api/ec2/cloud.py:1838 +#: nova/api/ec2/cloud.py:1844 msgid "Expecting key to be set" msgstr "" -#: nova/api/ec2/cloud.py:1912 +#: nova/api/ec2/cloud.py:1918 msgid "Invalid CIDR" msgstr "" @@ -2233,39 +2272,39 @@ msgstr "" msgid "Timestamp is invalid." msgstr "" -#: nova/api/metadata/handler.py:111 +#: nova/api/metadata/handler.py:112 msgid "" "X-Instance-ID present in request headers. The " "'service_neutron_metadata_proxy' option must be enabled to process this " "header." msgstr "" -#: nova/api/metadata/handler.py:140 nova/api/metadata/handler.py:147 +#: nova/api/metadata/handler.py:141 nova/api/metadata/handler.py:148 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "" -#: nova/api/metadata/handler.py:142 nova/api/metadata/handler.py:198 +#: nova/api/metadata/handler.py:143 nova/api/metadata/handler.py:199 msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/api/metadata/handler.py:160 +#: nova/api/metadata/handler.py:161 msgid "X-Instance-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:162 +#: nova/api/metadata/handler.py:163 msgid "X-Tenant-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:164 +#: nova/api/metadata/handler.py:165 msgid "Multiple X-Instance-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:166 +#: nova/api/metadata/handler.py:167 msgid "Multiple X-Tenant-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:180 +#: nova/api/metadata/handler.py:181 #, python-format msgid "" "X-Instance-ID-Signature: %(signature)s does not match the expected value:" @@ -2273,16 +2312,16 @@ msgid "" "%(remote_address)s" msgstr "" -#: nova/api/metadata/handler.py:189 +#: nova/api/metadata/handler.py:190 msgid "Invalid proxy request signature." msgstr "" -#: nova/api/metadata/handler.py:196 nova/api/metadata/handler.py:203 +#: nova/api/metadata/handler.py:197 nova/api/metadata/handler.py:204 #, python-format msgid "Failed to get metadata for instance id: %s" msgstr "" -#: nova/api/metadata/handler.py:207 +#: nova/api/metadata/handler.py:208 #, python-format msgid "" "Tenant_id %(tenant_id)s does not match tenant_id of instance " @@ -2311,39 +2350,39 @@ msgstr "" msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: nova/api/openstack/__init__.py:190 +#: nova/api/openstack/__init__.py:186 msgid "Must specify an ExtensionManager class" msgstr "" -#: nova/api/openstack/__init__.py:236 nova/api/openstack/__init__.py:410 +#: nova/api/openstack/__init__.py:232 nova/api/openstack/__init__.py:406 #, python-format msgid "" "Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " "resource" msgstr "" -#: nova/api/openstack/__init__.py:283 +#: nova/api/openstack/__init__.py:279 #: nova/api/openstack/compute/plugins/v3/servers.py:99 #, python-format msgid "Not loading %s because it is in the blacklist" msgstr "" -#: nova/api/openstack/__init__.py:288 +#: nova/api/openstack/__init__.py:284 #: nova/api/openstack/compute/plugins/v3/servers.py:104 #, python-format msgid "Not loading %s because it is not in the whitelist" msgstr "" -#: nova/api/openstack/__init__.py:295 +#: nova/api/openstack/__init__.py:291 msgid "V3 API has been disabled by configuration" msgstr "" -#: nova/api/openstack/__init__.py:308 +#: nova/api/openstack/__init__.py:304 #, python-format msgid "Extensions in both blacklist and whitelist: %s" msgstr "" -#: nova/api/openstack/__init__.py:332 +#: nova/api/openstack/__init__.py:328 #, python-format msgid "Missing core API extensions: %s" msgstr "" @@ -2381,59 +2420,51 @@ msgstr "" msgid "offset param must be positive" msgstr "" -#: nova/api/openstack/common.py:259 nova/api/openstack/compute/flavors.py:146 -#: nova/api/openstack/compute/servers.py:603 -#: nova/api/openstack/compute/plugins/v3/flavors.py:110 -#: nova/api/openstack/compute/plugins/v3/servers.py:280 -#, python-format -msgid "marker [%s] not found" -msgstr "" - -#: nova/api/openstack/common.py:299 +#: nova/api/openstack/common.py:276 #, python-format msgid "href %s does not contain version" msgstr "" -#: nova/api/openstack/common.py:314 +#: nova/api/openstack/common.py:291 msgid "Image metadata limit exceeded" msgstr "" -#: nova/api/openstack/common.py:322 +#: nova/api/openstack/common.py:299 msgid "Image metadata key cannot be blank" msgstr "" -#: nova/api/openstack/common.py:325 +#: nova/api/openstack/common.py:302 msgid "Image metadata key too long" msgstr "" -#: nova/api/openstack/common.py:328 +#: nova/api/openstack/common.py:305 msgid "Invalid image metadata" msgstr "" -#: nova/api/openstack/common.py:391 +#: nova/api/openstack/common.py:368 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "" -#: nova/api/openstack/common.py:394 +#: nova/api/openstack/common.py:371 #, python-format msgid "Cannot '%s' an instance which has never been active" msgstr "" -#: nova/api/openstack/common.py:397 +#: nova/api/openstack/common.py:374 #, python-format msgid "Instance is in an invalid state for '%s'" msgstr "" -#: nova/api/openstack/common.py:477 +#: nova/api/openstack/common.py:454 msgid "Rejecting snapshot request, snapshots currently disabled" msgstr "" -#: nova/api/openstack/common.py:479 +#: nova/api/openstack/common.py:456 msgid "Instance snapshots are not permitted at this time." msgstr "" -#: nova/api/openstack/common.py:600 +#: nova/api/openstack/common.py:577 msgid "Cells is not enabled." msgstr "" @@ -2569,6 +2600,14 @@ msgstr "" msgid "Invalid minDisk filter [%s]" msgstr "" +#: nova/api/openstack/compute/flavors.py:146 +#: nova/api/openstack/compute/servers.py:603 +#: nova/api/openstack/compute/plugins/v3/flavors.py:110 +#: nova/api/openstack/compute/plugins/v3/servers.py:280 +#, python-format +msgid "marker [%s] not found" +msgstr "" + #: nova/api/openstack/compute/image_metadata.py:35 #: nova/api/openstack/compute/images.py:141 #: nova/api/openstack/compute/images.py:157 @@ -2582,7 +2621,7 @@ msgstr "" #: nova/api/openstack/compute/image_metadata.py:82 #: nova/api/openstack/compute/server_metadata.py:79 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:108 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:85 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72 #: nova/api/openstack/compute/plugins/v3/server_metadata.py:77 msgid "Request body and URI mismatch" msgstr "" @@ -2590,7 +2629,6 @@ msgstr "" #: nova/api/openstack/compute/image_metadata.py:85 #: nova/api/openstack/compute/server_metadata.py:83 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:111 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:88 #: nova/api/openstack/compute/plugins/v3/server_metadata.py:81 msgid "Request body contains too many items" msgstr "" @@ -2663,12 +2701,12 @@ msgstr "" #: nova/api/openstack/compute/servers.py:625 #: nova/api/openstack/compute/servers.py:772 -#: nova/api/openstack/compute/servers.py:1079 -#: nova/api/openstack/compute/servers.py:1199 -#: nova/api/openstack/compute/servers.py:1384 -#: nova/api/openstack/compute/plugins/v3/servers.py:615 -#: nova/api/openstack/compute/plugins/v3/servers.py:727 -#: nova/api/openstack/compute/plugins/v3/servers.py:846 +#: nova/api/openstack/compute/servers.py:1081 +#: nova/api/openstack/compute/servers.py:1203 +#: nova/api/openstack/compute/servers.py:1388 +#: nova/api/openstack/compute/plugins/v3/servers.py:617 +#: nova/api/openstack/compute/plugins/v3/servers.py:729 +#: nova/api/openstack/compute/plugins/v3/servers.py:848 msgid "Instance could not be found" msgstr "" @@ -2769,121 +2807,120 @@ msgstr "" msgid "Invalid config_drive provided." msgstr "" -#: nova/api/openstack/compute/servers.py:1064 +#: nova/api/openstack/compute/servers.py:1066 msgid "HostId cannot be updated." msgstr "" -#: nova/api/openstack/compute/servers.py:1068 +#: nova/api/openstack/compute/servers.py:1070 msgid "Personality cannot be updated." msgstr "" -#: nova/api/openstack/compute/servers.py:1094 -#: nova/api/openstack/compute/servers.py:1113 -#: nova/api/openstack/compute/plugins/v3/servers.py:626 -#: nova/api/openstack/compute/plugins/v3/servers.py:642 +#: nova/api/openstack/compute/servers.py:1096 +#: nova/api/openstack/compute/servers.py:1115 +#: nova/api/openstack/compute/plugins/v3/servers.py:628 +#: nova/api/openstack/compute/plugins/v3/servers.py:644 msgid "Instance has not been resized." msgstr "" -#: nova/api/openstack/compute/servers.py:1116 -#: nova/api/openstack/compute/plugins/v3/servers.py:645 +#: nova/api/openstack/compute/servers.py:1118 +#: nova/api/openstack/compute/plugins/v3/servers.py:647 msgid "Flavor used by the instance could not be found." msgstr "" -#: nova/api/openstack/compute/servers.py:1132 -#: nova/api/openstack/compute/plugins/v3/servers.py:659 +#: nova/api/openstack/compute/servers.py:1134 +#: nova/api/openstack/compute/plugins/v3/servers.py:661 msgid "Argument 'type' for reboot must be a string" msgstr "" -#: nova/api/openstack/compute/servers.py:1138 -#: nova/api/openstack/compute/plugins/v3/servers.py:665 +#: nova/api/openstack/compute/servers.py:1140 +#: nova/api/openstack/compute/plugins/v3/servers.py:667 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "" -#: nova/api/openstack/compute/servers.py:1142 -#: nova/api/openstack/compute/plugins/v3/servers.py:669 +#: nova/api/openstack/compute/servers.py:1144 +#: nova/api/openstack/compute/plugins/v3/servers.py:671 msgid "Missing argument 'type' for reboot" msgstr "" -#: nova/api/openstack/compute/servers.py:1169 -#: nova/api/openstack/compute/plugins/v3/servers.py:697 +#: nova/api/openstack/compute/servers.py:1171 +#: nova/api/openstack/compute/plugins/v3/servers.py:699 msgid "Unable to locate requested flavor." msgstr "" -#: nova/api/openstack/compute/servers.py:1172 -#: nova/api/openstack/compute/plugins/v3/servers.py:700 +#: nova/api/openstack/compute/servers.py:1174 +#: nova/api/openstack/compute/plugins/v3/servers.py:702 msgid "Resize requires a flavor change." msgstr "" -#: nova/api/openstack/compute/servers.py:1180 -#: nova/api/openstack/compute/plugins/v3/servers.py:708 +#: nova/api/openstack/compute/servers.py:1182 +#: nova/api/openstack/compute/plugins/v3/servers.py:710 msgid "You are not authorized to access the image the instance was started with." msgstr "" -#: nova/api/openstack/compute/servers.py:1184 -#: nova/api/openstack/compute/plugins/v3/servers.py:712 +#: nova/api/openstack/compute/servers.py:1186 +#: nova/api/openstack/compute/plugins/v3/servers.py:714 msgid "Image that the instance was started with could not be found." msgstr "" -#: nova/api/openstack/compute/servers.py:1188 -#: nova/api/openstack/compute/plugins/v3/servers.py:716 +#: nova/api/openstack/compute/servers.py:1190 +#: nova/api/openstack/compute/plugins/v3/servers.py:718 msgid "Invalid instance image." msgstr "" -#: nova/api/openstack/compute/servers.py:1211 +#: nova/api/openstack/compute/servers.py:1215 msgid "Missing imageRef attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1216 -#: nova/api/openstack/compute/servers.py:1224 +#: nova/api/openstack/compute/servers.py:1220 +#: nova/api/openstack/compute/servers.py:1228 msgid "Invalid imageRef provided." msgstr "" -#: nova/api/openstack/compute/servers.py:1254 +#: nova/api/openstack/compute/servers.py:1258 msgid "Missing flavorRef attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1267 +#: nova/api/openstack/compute/servers.py:1271 msgid "No adminPass was specified" msgstr "" -#: nova/api/openstack/compute/servers.py:1275 +#: nova/api/openstack/compute/servers.py:1279 #: nova/api/openstack/compute/plugins/v3/admin_password.py:56 msgid "Unable to set password on instance" msgstr "" -#: nova/api/openstack/compute/servers.py:1284 +#: nova/api/openstack/compute/servers.py:1288 msgid "Unable to parse metadata key/value pairs." msgstr "" -#: nova/api/openstack/compute/servers.py:1297 +#: nova/api/openstack/compute/servers.py:1301 msgid "Resize request has invalid 'flavorRef' attribute." msgstr "" -#: nova/api/openstack/compute/servers.py:1300 +#: nova/api/openstack/compute/servers.py:1304 msgid "Resize requests require 'flavorRef' attribute." msgstr "" -#: nova/api/openstack/compute/servers.py:1320 +#: nova/api/openstack/compute/servers.py:1324 msgid "Could not parse imageRef from request." msgstr "" -#: nova/api/openstack/compute/servers.py:1390 -#: nova/api/openstack/compute/plugins/v3/servers.py:852 +#: nova/api/openstack/compute/servers.py:1394 +#: nova/api/openstack/compute/plugins/v3/servers.py:854 msgid "Cannot find image for rebuild" msgstr "" -#: nova/api/openstack/compute/servers.py:1423 +#: nova/api/openstack/compute/servers.py:1427 msgid "createImage entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1432 -#: nova/api/openstack/compute/contrib/admin_actions.py:286 -#: nova/api/openstack/compute/plugins/v3/create_backup.py:85 -#: nova/api/openstack/compute/plugins/v3/servers.py:892 +#: nova/api/openstack/compute/servers.py:1436 +#: nova/api/openstack/compute/contrib/admin_actions.py:288 +#: nova/api/openstack/compute/plugins/v3/servers.py:894 msgid "Invalid metadata" msgstr "" -#: nova/api/openstack/compute/servers.py:1490 +#: nova/api/openstack/compute/servers.py:1494 msgid "Invalid adminPass" msgstr "" @@ -2891,11 +2928,11 @@ msgstr "" #: nova/api/openstack/compute/contrib/admin_actions.py:88 #: nova/api/openstack/compute/contrib/admin_actions.py:113 #: nova/api/openstack/compute/contrib/admin_actions.py:135 -#: nova/api/openstack/compute/contrib/admin_actions.py:176 -#: nova/api/openstack/compute/contrib/admin_actions.py:195 -#: nova/api/openstack/compute/contrib/admin_actions.py:214 -#: nova/api/openstack/compute/contrib/admin_actions.py:233 -#: nova/api/openstack/compute/contrib/admin_actions.py:391 +#: nova/api/openstack/compute/contrib/admin_actions.py:178 +#: nova/api/openstack/compute/contrib/admin_actions.py:197 +#: nova/api/openstack/compute/contrib/admin_actions.py:216 +#: nova/api/openstack/compute/contrib/admin_actions.py:235 +#: nova/api/openstack/compute/contrib/admin_actions.py:393 #: nova/api/openstack/compute/contrib/multinic.py:43 #: nova/api/openstack/compute/contrib/rescue.py:45 #: nova/api/openstack/compute/contrib/shelve.py:43 @@ -2903,6 +2940,8 @@ msgid "Server not found" msgstr "" #: nova/api/openstack/compute/contrib/admin_actions.py:66 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 msgid "Virt driver does not implement pause function." msgstr "" @@ -2930,129 +2969,113 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:163 +#: nova/api/openstack/compute/contrib/admin_actions.py:165 #, python-format msgid "Error in migrate %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:182 +#: nova/api/openstack/compute/contrib/admin_actions.py:184 #, python-format msgid "Compute.api::reset_network %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:201 +#: nova/api/openstack/compute/contrib/admin_actions.py:203 #, python-format msgid "Compute.api::inject_network_info %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:218 +#: nova/api/openstack/compute/contrib/admin_actions.py:220 #, python-format msgid "Compute.api::lock %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:237 +#: nova/api/openstack/compute/contrib/admin_actions.py:239 #, python-format msgid "Compute.api::unlock %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:263 +#: nova/api/openstack/compute/contrib/admin_actions.py:265 #, python-format msgid "createBackup entity requires %s attribute" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:267 +#: nova/api/openstack/compute/contrib/admin_actions.py:269 msgid "Malformed createBackup entity" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:273 +#: nova/api/openstack/compute/contrib/admin_actions.py:275 msgid "createBackup attribute 'rotation' must be an integer" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:276 +#: nova/api/openstack/compute/contrib/admin_actions.py:278 msgid "createBackup attribute 'rotation' must be greater than or equal to zero" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:292 -#: nova/api/openstack/compute/contrib/console_output.py:45 +#: nova/api/openstack/compute/contrib/admin_actions.py:294 +#: nova/api/openstack/compute/contrib/console_output.py:46 #: nova/api/openstack/compute/contrib/server_start_stop.py:40 msgid "Instance not found" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:323 -#: nova/api/openstack/compute/plugins/v3/migrate_server.py:80 +#: nova/api/openstack/compute/contrib/admin_actions.py:325 msgid "" "host, block_migration and disk_over_commit must be specified for live " "migration." msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:360 +#: nova/api/openstack/compute/contrib/admin_actions.py:362 #, python-format msgid "Live migration of instance %s to another host failed" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:363 +#: nova/api/openstack/compute/contrib/admin_actions.py:365 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:381 +#: nova/api/openstack/compute/contrib/admin_actions.py:383 #: nova/api/openstack/compute/plugins/v3/admin_actions.py:83 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:395 +#: nova/api/openstack/compute/contrib/admin_actions.py:397 #, python-format msgid "Compute.api::resetState %s" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:99 -#, python-format -msgid "Cannot show aggregate: %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/aggregates.py:137 -#, python-format -msgid "Cannot update aggregate: %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/aggregates.py:151 -#, python-format -msgid "Cannot delete aggregate: %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/aggregates.py:162 +#: nova/api/openstack/compute/contrib/aggregates.py:161 #, python-format msgid "Aggregates does not have %s action" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:166 +#: nova/api/openstack/compute/contrib/aggregates.py:165 #: nova/api/openstack/compute/contrib/flavormanage.py:55 #: nova/api/openstack/compute/contrib/keypairs.py:86 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:167 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:169 msgid "Invalid request body" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:176 -#: nova/api/openstack/compute/contrib/aggregates.py:181 +#: nova/api/openstack/compute/contrib/aggregates.py:175 +#: nova/api/openstack/compute/contrib/aggregates.py:180 #, python-format msgid "Cannot add host %(host)s in aggregate %(id)s" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:195 -#: nova/api/openstack/compute/contrib/aggregates.py:199 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:151 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:155 +#: nova/api/openstack/compute/contrib/aggregates.py:194 +#: nova/api/openstack/compute/contrib/aggregates.py:198 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:153 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:157 #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:218 -#: nova/api/openstack/compute/plugins/v3/aggregates.py:175 +#: nova/api/openstack/compute/contrib/aggregates.py:217 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:177 msgid "The value of metadata must be a dict" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:230 +#: nova/api/openstack/compute/contrib/aggregates.py:229 #, python-format msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "" @@ -3075,7 +3098,7 @@ msgstr "" #: nova/api/openstack/compute/contrib/attach_interfaces.py:119 #: nova/api/openstack/compute/contrib/attach_interfaces.py:154 #: nova/api/openstack/compute/contrib/attach_interfaces.py:177 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:166 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:169 msgid "Network driver does not support this function." msgstr "" @@ -3084,12 +3107,12 @@ msgid "Failed to attach interface" msgstr "" #: nova/api/openstack/compute/contrib/attach_interfaces.py:130 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:128 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:131 msgid "Attachments update is not supported" msgstr "" #: nova/api/openstack/compute/contrib/attach_interfaces.py:142 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:139 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:142 #, python-format msgid "Detach interface %s" msgstr "" @@ -3166,19 +3189,19 @@ msgstr "" msgid "The requested console type details are not accessible" msgstr "" -#: nova/api/openstack/compute/contrib/console_output.py:51 +#: nova/api/openstack/compute/contrib/console_output.py:52 msgid "os-getConsoleOutput malformed or missing from request body" msgstr "" -#: nova/api/openstack/compute/contrib/console_output.py:62 +#: nova/api/openstack/compute/contrib/console_output.py:63 msgid "Length in request body must be an integer value" msgstr "" -#: nova/api/openstack/compute/contrib/console_output.py:70 +#: nova/api/openstack/compute/contrib/console_output.py:71 msgid "Unable to get console" msgstr "" -#: nova/api/openstack/compute/contrib/console_output.py:75 +#: nova/api/openstack/compute/contrib/console_output.py:76 #: nova/api/openstack/compute/plugins/v3/console_output.py:60 msgid "Unable to get console log, functionality not implemented" msgstr "" @@ -3188,17 +3211,17 @@ msgid "Instance not yet ready" msgstr "" #: nova/api/openstack/compute/contrib/consoles.py:52 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:62 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:60 msgid "Unable to get vnc console, functionality not implemented" msgstr "" #: nova/api/openstack/compute/contrib/consoles.py:76 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:93 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:89 msgid "Unable to get spice console, functionality not implemented" msgstr "" #: nova/api/openstack/compute/contrib/consoles.py:101 -#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:127 +#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:121 msgid "Unable to get rdp console, functionality not implemented" msgstr "" @@ -3250,8 +3273,12 @@ msgstr "" msgid "No request body" msgstr "" +#: nova/api/openstack/compute/contrib/flavor_access.py:170 +#: nova/api/openstack/compute/contrib/flavor_access.py:194 +msgid "Missing tenant parameter" +msgstr "" + #: nova/api/openstack/compute/contrib/flavorextraspecs.py:56 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:42 msgid "No Request Body" msgstr "" @@ -3261,8 +3288,8 @@ msgstr "" #: nova/api/openstack/compute/contrib/flavorextraspecs.py:134 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:150 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:113 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:132 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:96 +#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:115 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" @@ -3272,7 +3299,7 @@ msgid "DNS entries not found." msgstr "" #: nova/api/openstack/compute/contrib/floating_ips.py:129 -#: nova/api/openstack/compute/contrib/floating_ips.py:177 +#: nova/api/openstack/compute/contrib/floating_ips.py:183 #, python-format msgid "Floating ip not found for id %s" msgstr "" @@ -3286,51 +3313,60 @@ msgstr "" msgid "No more floating ips available." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:218 -#: nova/api/openstack/compute/contrib/floating_ips.py:283 -#: nova/api/openstack/compute/contrib/security_groups.py:481 +#: nova/api/openstack/compute/contrib/floating_ips.py:168 +#, python-format +msgid "IP allocation over quota in pool %s." +msgstr "" + +#: nova/api/openstack/compute/contrib/floating_ips.py:170 +msgid "IP allocation over quota." +msgstr "" + +#: nova/api/openstack/compute/contrib/floating_ips.py:220 +#: nova/api/openstack/compute/contrib/floating_ips.py:285 +#: nova/api/openstack/compute/contrib/security_groups.py:482 msgid "Missing parameter dict" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:221 -#: nova/api/openstack/compute/contrib/floating_ips.py:286 +#: nova/api/openstack/compute/contrib/floating_ips.py:223 +#: nova/api/openstack/compute/contrib/floating_ips.py:288 msgid "Address not specified" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:227 +#: nova/api/openstack/compute/contrib/floating_ips.py:229 msgid "No nw_info cache associated with instance" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:232 +#: nova/api/openstack/compute/contrib/floating_ips.py:234 msgid "No fixed ips associated to instance" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:243 +#: nova/api/openstack/compute/contrib/floating_ips.py:245 msgid "Specified fixed address not assigned to instance" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:257 +#: nova/api/openstack/compute/contrib/floating_ips.py:259 msgid "floating ip is already associated" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:260 +#: nova/api/openstack/compute/contrib/floating_ips.py:262 msgid "l3driver call to add floating ip failed" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:263 -#: nova/api/openstack/compute/contrib/floating_ips.py:294 +#: nova/api/openstack/compute/contrib/floating_ips.py:265 +#: nova/api/openstack/compute/contrib/floating_ips.py:296 msgid "floating ip not found" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:268 +#: nova/api/openstack/compute/contrib/floating_ips.py:270 msgid "Error. Unable to associate floating ip" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:309 +#: nova/api/openstack/compute/contrib/floating_ips.py:311 msgid "Floating ip is not associated" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:313 +#: nova/api/openstack/compute/contrib/floating_ips.py:315 #, python-format msgid "Floating ip %(address)s is not associated with instance %(id)s." msgstr "" @@ -3352,63 +3388,59 @@ msgid "fping utility is not found." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:183 -#: nova/api/openstack/compute/plugins/v3/hosts.py:128 #, python-format msgid "Invalid update setting: '%s'" msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:186 -#: nova/api/openstack/compute/plugins/v3/hosts.py:131 #, python-format msgid "Invalid status: '%s'" msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:188 -#: nova/api/openstack/compute/plugins/v3/hosts.py:133 #, python-format msgid "Invalid mode: '%s'" msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:190 -#: nova/api/openstack/compute/plugins/v3/hosts.py:135 msgid "'status' or 'maintenance_mode' needed for host update" msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:206 -#: nova/api/openstack/compute/plugins/v3/hosts.py:152 +#: nova/api/openstack/compute/plugins/v3/hosts.py:134 #, python-format msgid "Putting host %(host_name)s in maintenance mode %(mode)s." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:212 -#: nova/api/openstack/compute/plugins/v3/hosts.py:158 +#: nova/api/openstack/compute/plugins/v3/hosts.py:140 msgid "Virt driver does not implement host maintenance mode." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:227 -#: nova/api/openstack/compute/plugins/v3/hosts.py:174 +#: nova/api/openstack/compute/plugins/v3/hosts.py:156 #, python-format msgid "Enabling host %s." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:229 -#: nova/api/openstack/compute/plugins/v3/hosts.py:176 +#: nova/api/openstack/compute/plugins/v3/hosts.py:158 #, python-format msgid "Disabling host %s." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:234 -#: nova/api/openstack/compute/plugins/v3/hosts.py:181 +#: nova/api/openstack/compute/plugins/v3/hosts.py:163 msgid "Virt driver does not implement host disabled status." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:250 -#: nova/api/openstack/compute/plugins/v3/hosts.py:199 +#: nova/api/openstack/compute/plugins/v3/hosts.py:181 msgid "Virt driver does not implement host power management." msgstr "" #: nova/api/openstack/compute/contrib/hosts.py:336 -#: nova/api/openstack/compute/plugins/v3/hosts.py:292 +#: nova/api/openstack/compute/plugins/v3/hosts.py:274 msgid "Describe-resource is admin only functionality" msgstr "" @@ -3596,7 +3628,7 @@ msgid "Malformed scheduler_hints attribute" msgstr "" #: nova/api/openstack/compute/contrib/security_group_default_rules.py:127 -#: nova/api/openstack/compute/contrib/security_groups.py:386 +#: nova/api/openstack/compute/contrib/security_groups.py:387 msgid "Not enough parameters to build a valid rule." msgstr "" @@ -3608,16 +3640,16 @@ msgstr "" msgid "security group default rule not found" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:394 +#: nova/api/openstack/compute/contrib/security_groups.py:395 #, python-format msgid "Bad prefix for network in cidr %s" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:484 +#: nova/api/openstack/compute/contrib/security_groups.py:485 msgid "Security group not specified" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:488 +#: nova/api/openstack/compute/contrib/security_groups.py:489 msgid "Security group name cannot be empty" msgstr "" @@ -3650,39 +3682,39 @@ msgstr "" msgid "No instances found for any event" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:162 +#: nova/api/openstack/compute/contrib/server_groups.py:161 msgid "Conflicting policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:167 +#: nova/api/openstack/compute/contrib/server_groups.py:166 #, python-format msgid "Invalid policies: %s" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:172 +#: nova/api/openstack/compute/contrib/server_groups.py:171 msgid "Duplicate policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:177 +#: nova/api/openstack/compute/contrib/server_groups.py:176 msgid "the body is invalid." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:186 +#: nova/api/openstack/compute/contrib/server_groups.py:185 #, python-format msgid "'%s' is either missing or empty." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:192 +#: nova/api/openstack/compute/contrib/server_groups.py:191 #, python-format msgid "Invalid format for name: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:200 +#: nova/api/openstack/compute/contrib/server_groups.py:199 #, python-format msgid "'%s' is not a list" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:204 +#: nova/api/openstack/compute/contrib/server_groups.py:203 #, python-format msgid "unsupported fields: %s" msgstr "" @@ -3709,11 +3741,11 @@ msgstr "" msgid "Missing disabled reason field" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:231 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:230 msgid "Datetime is in invalid format" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:250 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:249 msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" @@ -3784,11 +3816,11 @@ msgstr "" msgid "access_ip_v6 is not proper IPv6 format" msgstr "" -#: nova/api/openstack/compute/plugins/v3/aggregates.py:170 +#: nova/api/openstack/compute/plugins/v3/aggregates.py:172 msgid "Invalid request format for metadata" msgstr "" -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:103 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:106 #, python-format msgid "Attach interface to %s" msgstr "" @@ -3802,23 +3834,6 @@ msgstr "" msgid "token not provided" msgstr "" -#: nova/api/openstack/compute/plugins/v3/create_backup.py:62 -#, python-format -msgid "create_backup entity requires %s attribute" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:66 -msgid "Malformed create_backup entity" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:72 -msgid "create_backup attribute 'rotation' must be an integer" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/create_backup.py:75 -msgid "create_backup attribute 'rotation' must be greater than or equal to zero" -msgstr "" - #: nova/api/openstack/compute/plugins/v3/extended_volumes.py:98 msgid "The volume was either invalid or not attached to the instance." msgstr "" @@ -3844,19 +3859,6 @@ msgstr "" msgid "Invalid min_disk filter [%s]" msgstr "" -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:66 -msgid "No or bad extra_specs provided" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:73 -#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:95 -msgid "Concurrent transaction has been committed, try again" -msgstr "" - -#: nova/api/openstack/compute/plugins/v3/hosts.py:120 -msgid "The request body invalid" -msgstr "" - #: nova/api/openstack/compute/plugins/v3/hypervisors.py:125 msgid "Need parameter 'query' to specify which hypervisor to filter on" msgstr "" @@ -3882,7 +3884,7 @@ msgid "" msgstr "" #: nova/api/openstack/compute/plugins/v3/servers.py:412 -#: nova/api/openstack/compute/plugins/v3/servers.py:585 +#: nova/api/openstack/compute/plugins/v3/servers.py:587 msgid "The request body is invalid" msgstr "" @@ -3891,39 +3893,39 @@ msgstr "" msgid "Invalid flavor_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:596 +#: nova/api/openstack/compute/plugins/v3/servers.py:598 msgid "host_id cannot be updated." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:741 +#: nova/api/openstack/compute/plugins/v3/servers.py:743 msgid "Invalid image_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:760 +#: nova/api/openstack/compute/plugins/v3/servers.py:762 msgid "Missing image_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:767 +#: nova/api/openstack/compute/plugins/v3/servers.py:769 msgid "Missing flavor_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:780 +#: nova/api/openstack/compute/plugins/v3/servers.py:782 msgid "Resize request has invalid 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:783 +#: nova/api/openstack/compute/plugins/v3/servers.py:785 msgid "Resize requests require 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:799 +#: nova/api/openstack/compute/plugins/v3/servers.py:801 msgid "Could not parse image_ref from request." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:883 +#: nova/api/openstack/compute/plugins/v3/servers.py:885 msgid "create_image entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:945 +#: nova/api/openstack/compute/plugins/v3/servers.py:947 msgid "Invalid admin_password" msgstr "" @@ -3935,12 +3937,12 @@ msgstr "" msgid "Instance has had its instance_type removed from the DB" msgstr "" -#: nova/api/validation/validators.py:61 +#: nova/api/validation/validators.py:62 #, python-format msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" -#: nova/cells/manager.py:78 +#: nova/cells/manager.py:79 msgid "" "The cells feature of Nova is considered experimental by the OpenStack " "project because it receives much less testing than the rest of Nova. This" @@ -3993,42 +3995,42 @@ msgstr "" msgid "Unknown method '%(method)s' in compute API" msgstr "" -#: nova/cells/messaging.py:1096 +#: nova/cells/messaging.py:1103 #, python-format msgid "Got message to create instance fault: %(instance_fault)s" msgstr "" -#: nova/cells/messaging.py:1119 +#: nova/cells/messaging.py:1126 #, python-format msgid "" "Forcing a sync of instances, project_id=%(projid_str)s, " "updated_since=%(since_str)s" msgstr "" -#: nova/cells/messaging.py:1198 +#: nova/cells/messaging.py:1205 #, python-format msgid "No match when trying to update BDM: %(bdm)s" msgstr "" -#: nova/cells/messaging.py:1673 +#: nova/cells/messaging.py:1680 #, python-format msgid "No cell_name for %(method)s() from API" msgstr "" -#: nova/cells/messaging.py:1690 +#: nova/cells/messaging.py:1697 msgid "No cell_name for instance update from API" msgstr "" -#: nova/cells/messaging.py:1853 +#: nova/cells/messaging.py:1860 #, python-format msgid "Returning exception %s to caller" msgstr "" -#: nova/cells/rpcapi.py:369 +#: nova/cells/rpcapi.py:378 msgid "Failed to notify cells of BDM update/create." msgstr "" -#: nova/cells/rpcapi.py:385 +#: nova/cells/rpcapi.py:394 msgid "Failed to notify cells of BDM destroy." msgstr "" @@ -4098,71 +4100,71 @@ msgstr "" msgid "Failed to load %s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:211 +#: nova/cmd/baremetal_deploy_helper.py:210 #, python-format msgid "parent device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:214 +#: nova/cmd/baremetal_deploy_helper.py:213 #, python-format msgid "root device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:216 +#: nova/cmd/baremetal_deploy_helper.py:215 #, python-format msgid "swap device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:218 +#: nova/cmd/baremetal_deploy_helper.py:217 #, python-format msgid "ephemeral device '%s' not found" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:228 +#: nova/cmd/baremetal_deploy_helper.py:227 msgid "Failed to detect root device UUID." msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:252 +#: nova/cmd/baremetal_deploy_helper.py:251 #, python-format msgid "Cmd : %s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:253 +#: nova/cmd/baremetal_deploy_helper.py:252 #, python-format msgid "StdOut : %r" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:254 +#: nova/cmd/baremetal_deploy_helper.py:253 #, python-format msgid "StdErr : %r" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:282 +#: nova/cmd/baremetal_deploy_helper.py:281 #, python-format msgid "start deployment for node %(node_id)s, params %(params)s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:291 +#: nova/cmd/baremetal_deploy_helper.py:290 #, python-format msgid "deployment to node %s failed" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:295 +#: nova/cmd/baremetal_deploy_helper.py:294 #, python-format msgid "deployment to node %s done" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:317 +#: nova/cmd/baremetal_deploy_helper.py:316 #, python-format msgid "post: environ=%s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:336 +#: nova/cmd/baremetal_deploy_helper.py:335 #, python-format msgid "Deploy agent error message: %s" msgstr "" -#: nova/cmd/baremetal_deploy_helper.py:360 +#: nova/cmd/baremetal_deploy_helper.py:359 #, python-format msgid "request is queued: node %(node_id)s, params %(params)s" msgstr "" @@ -4279,40 +4281,40 @@ msgid "" "Use python-neutronclient instead." msgstr "" -#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:217 +#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:218 msgid "id" msgstr "" -#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:218 +#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:219 msgid "IPv4" msgstr "" -#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:219 +#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:220 msgid "IPv6" msgstr "" -#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:220 +#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:221 msgid "start address" msgstr "" -#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:221 +#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:222 msgid "DNS1" msgstr "" -#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:222 +#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:223 msgid "DNS2" msgstr "" -#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:223 +#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:224 msgid "VlanID" msgstr "" #: nova/cmd/manage.py:558 nova/cmd/manage.py:665 -#: nova/tests/test_nova_manage.py:224 +#: nova/tests/test_nova_manage.py:225 msgid "project" msgstr "" -#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:225 +#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:226 msgid "uuid" msgstr "" @@ -4523,288 +4525,296 @@ msgstr "" msgid "No db access allowed in nova-network: %s" msgstr "" -#: nova/compute/api.py:362 +#: nova/compute/api.py:353 msgid "Cannot run any more instances of this type." msgstr "" -#: nova/compute/api.py:369 +#: nova/compute/api.py:360 #, python-format msgid "Can only run %s more instances of this type." msgstr "" -#: nova/compute/api.py:381 +#: nova/compute/api.py:372 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d " "instances. %(msg)s" msgstr "" -#: nova/compute/api.py:385 +#: nova/compute/api.py:376 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d " "and %(max_count)d instances. %(msg)s" msgstr "" -#: nova/compute/api.py:406 +#: nova/compute/api.py:397 msgid "Metadata type should be dict." msgstr "" -#: nova/compute/api.py:412 +#: nova/compute/api.py:403 #, python-format msgid "" "Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " "properties" msgstr "" -#: nova/compute/api.py:424 +#: nova/compute/api.py:415 #, python-format msgid "Metadata property key '%s' is not a string." msgstr "" -#: nova/compute/api.py:427 +#: nova/compute/api.py:418 #, python-format msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string." msgstr "" -#: nova/compute/api.py:431 +#: nova/compute/api.py:422 msgid "Metadata property key blank" msgstr "" -#: nova/compute/api.py:434 +#: nova/compute/api.py:425 msgid "Metadata property key greater than 255 characters" msgstr "" -#: nova/compute/api.py:437 +#: nova/compute/api.py:428 msgid "Metadata property value greater than 255 characters" msgstr "" -#: nova/compute/api.py:574 +#: nova/compute/api.py:565 msgid "Failed to set instance name using multi_instance_display_name_template." msgstr "" -#: nova/compute/api.py:676 +#: nova/compute/api.py:667 msgid "Cannot attach one or more volumes to multiple instances" msgstr "" -#: nova/compute/api.py:718 +#: nova/compute/api.py:709 msgid "The requested availability zone is not available" msgstr "" -#: nova/compute/api.py:1119 +#: nova/compute/api.py:1110 msgid "" "Images with destination_type 'volume' need to have a non-zero size " "specified" msgstr "" -#: nova/compute/api.py:1150 +#: nova/compute/api.py:1141 msgid "More than one swap drive requested." msgstr "" -#: nova/compute/api.py:1299 -#: nova/tests/api/openstack/compute/test_servers.py:3122 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2460 +#: nova/compute/api.py:1290 +#: nova/tests/api/openstack/compute/test_servers.py:3145 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2484 msgid "" "Unable to launch multiple instances with a single configured port ID. " "Please launch your instance one by one with different ports." msgstr "" -#: nova/compute/api.py:1401 +#: nova/compute/api.py:1311 +msgid "max_count cannot be greater than 1 if an fixed_ip is specified." +msgstr "" + +#: nova/compute/api.py:1415 msgid "instance termination disabled" msgstr "" -#: nova/compute/api.py:1416 +#: nova/compute/api.py:1430 #, python-format msgid "Working on deleting snapshot %s from shelved instance..." msgstr "" -#: nova/compute/api.py:1423 +#: nova/compute/api.py:1437 #, python-format msgid "Failed to delete snapshot from shelved instance (%s)." msgstr "" -#: nova/compute/api.py:1427 +#: nova/compute/api.py:1441 msgid "" "Something wrong happened when trying to delete snapshot from shelved " "instance." msgstr "" -#: nova/compute/api.py:1492 +#: nova/compute/api.py:1506 msgid "Instance is already in deleting state, ignoring this request" msgstr "" -#: nova/compute/api.py:1540 +#: nova/compute/api.py:1553 #, python-format msgid "" "Found an unconfirmed migration during delete, id: %(id)s, status: " "%(status)s" msgstr "" -#: nova/compute/api.py:1550 +#: nova/compute/api.py:1563 msgid "Instance may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1567 +#: nova/compute/api.py:1580 #, python-format msgid "Migration %s may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1603 +#: nova/compute/api.py:1615 #, python-format msgid "Flavor %d not found" msgstr "" -#: nova/compute/api.py:1621 +#: nova/compute/api.py:1633 #, python-format msgid "instance's host %s is down, deleting from database" msgstr "" -#: nova/compute/api.py:1648 nova/compute/manager.py:2279 +#: nova/compute/api.py:1660 #, python-format msgid "Ignoring volume cleanup failure due to %s" msgstr "" -#: nova/compute/api.py:2043 +#: nova/compute/api.py:2061 #, python-format msgid "snapshot for %s" msgstr "" -#: nova/compute/api.py:2415 +#: nova/compute/api.py:2399 +msgid "Resize to zero disk flavor is not allowed." +msgstr "" + +#: nova/compute/api.py:2438 #, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance." msgstr "" -#: nova/compute/api.py:2584 +#: nova/compute/api.py:2613 msgid "Cannot rescue a volume-backed instance" msgstr "" -#: nova/compute/api.py:2811 +#: nova/compute/api.py:2840 msgid "Volume must be attached in order to detach." msgstr "" -#: nova/compute/api.py:2831 +#: nova/compute/api.py:2860 msgid "Old volume is attached to a different instance." msgstr "" -#: nova/compute/api.py:2834 +#: nova/compute/api.py:2863 msgid "New volume must be detached in order to swap." msgstr "" -#: nova/compute/api.py:2837 +#: nova/compute/api.py:2866 msgid "New volume must be the same size or larger." msgstr "" -#: nova/compute/api.py:3032 +#: nova/compute/api.py:3067 #, python-format msgid "Instance compute service state on %s expected to be down, but it was up." msgstr "" -#: nova/compute/api.py:3335 +#: nova/compute/api.py:3369 msgid "Host aggregate is not empty" msgstr "" -#: nova/compute/api.py:3368 +#: nova/compute/api.py:3402 #, python-format msgid "More than 1 AZ for host %s" msgstr "" -#: nova/compute/api.py:3403 +#: nova/compute/api.py:3437 #, python-format msgid "Host already in availability zone %s" msgstr "" -#: nova/compute/api.py:3491 nova/tests/compute/test_keypairs.py:135 +#: nova/compute/api.py:3525 nova/tests/compute/test_keypairs.py:135 msgid "Keypair name contains unsafe characters" msgstr "" -#: nova/compute/api.py:3495 nova/tests/compute/test_keypairs.py:127 +#: nova/compute/api.py:3529 nova/tests/compute/test_keypairs.py:127 #: nova/tests/compute/test_keypairs.py:131 msgid "Keypair name must be between 1 and 255 characters long" msgstr "" -#: nova/compute/api.py:3583 +#: nova/compute/api.py:3617 #, python-format msgid "Security group %s is not a string or unicode" msgstr "" -#: nova/compute/api.py:3586 +#: nova/compute/api.py:3620 #, python-format msgid "Security group %s cannot be empty." msgstr "" -#: nova/compute/api.py:3594 +#: nova/compute/api.py:3628 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " "limited to '%(allowed)s'." msgstr "" -#: nova/compute/api.py:3600 +#: nova/compute/api.py:3634 #, python-format msgid "Security group %s should not be greater than 255 characters." msgstr "" -#: nova/compute/api.py:3618 +#: nova/compute/api.py:3652 msgid "Quota exceeded, too many security groups." msgstr "" -#: nova/compute/api.py:3621 +#: nova/compute/api.py:3655 #, python-format msgid "Create Security Group %s" msgstr "" -#: nova/compute/api.py:3633 +#: nova/compute/api.py:3667 #, python-format msgid "Security group %s already exists" msgstr "" -#: nova/compute/api.py:3646 +#: nova/compute/api.py:3680 #, python-format msgid "Unable to update system group '%s'" msgstr "" -#: nova/compute/api.py:3708 +#: nova/compute/api.py:3742 #, python-format msgid "Unable to delete system group '%s'" msgstr "" -#: nova/compute/api.py:3713 +#: nova/compute/api.py:3747 msgid "Security group is still in use" msgstr "" -#: nova/compute/api.py:3723 +#: nova/compute/api.py:3757 msgid "Failed to update usages deallocating security group" msgstr "" -#: nova/compute/api.py:3726 +#: nova/compute/api.py:3760 #, python-format msgid "Delete security group %s" msgstr "" -#: nova/compute/api.py:3802 nova/compute/api.py:3885 +#: nova/compute/api.py:3836 nova/compute/api.py:3919 #, python-format msgid "Rule (%s) not found" msgstr "" -#: nova/compute/api.py:3818 +#: nova/compute/api.py:3852 msgid "Quota exceeded, too many security group rules." msgstr "" -#: nova/compute/api.py:3821 +#: nova/compute/api.py:3855 #, python-format msgid "" "Security group %(name)s added %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3836 +#: nova/compute/api.py:3870 #, python-format msgid "" "Security group %(name)s removed %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3892 +#: nova/compute/api.py:3926 msgid "Security group id should be integer" msgstr "" @@ -4896,782 +4906,778 @@ msgid "" "underscores, colons and spaces." msgstr "" -#: nova/compute/manager.py:278 +#: nova/compute/manager.py:283 #, python-format msgid "Task possibly preempted: %s" msgstr "" -#: nova/compute/manager.py:360 nova/compute/manager.py:2849 +#: nova/compute/manager.py:365 nova/compute/manager.py:2885 #, python-format msgid "Error while trying to clean up image %s" msgstr "" -#: nova/compute/manager.py:501 +#: nova/compute/manager.py:506 msgid "Instance event failed" msgstr "" -#: nova/compute/manager.py:600 +#: nova/compute/manager.py:605 #, python-format msgid "%s is not a valid node managed by this compute host." msgstr "" -#: nova/compute/manager.py:698 +#: nova/compute/manager.py:704 #, python-format msgid "" "Deleting instance as its host (%(instance_host)s) is not equal to our " "host (%(our_host)s)." msgstr "" -#: nova/compute/manager.py:713 +#: nova/compute/manager.py:719 msgid "Instance has been marked deleted already, removing it from the hypervisor." msgstr "" -#: nova/compute/manager.py:733 +#: nova/compute/manager.py:739 msgid "" "Hypervisor driver does not support instance shared storage check, " "assuming it's not on shared storage" msgstr "" -#: nova/compute/manager.py:739 +#: nova/compute/manager.py:745 msgid "Failed to check if instance shared" msgstr "" -#: nova/compute/manager.py:805 nova/compute/manager.py:856 +#: nova/compute/manager.py:811 nova/compute/manager.py:862 msgid "Failed to complete a deletion" msgstr "" -#: nova/compute/manager.py:838 +#: nova/compute/manager.py:844 msgid "" "Service started deleting the instance during the previous run, but did " "not finish. Restarting the deletion now." msgstr "" -#: nova/compute/manager.py:879 +#: nova/compute/manager.py:885 #, python-format msgid "" "Instance in transitional state (%(task_state)s) at start-up and power " "state is (%(power_state)s), clearing task state" msgstr "" -#: nova/compute/manager.py:897 +#: nova/compute/manager.py:903 msgid "Failed to stop instance" msgstr "" -#: nova/compute/manager.py:909 +#: nova/compute/manager.py:915 msgid "Failed to start instance" msgstr "" -#: nova/compute/manager.py:934 +#: nova/compute/manager.py:940 msgid "Failed to revert crashed migration" msgstr "" -#: nova/compute/manager.py:937 +#: nova/compute/manager.py:943 msgid "Instance found in migrating state during startup. Resetting task_state" msgstr "" -#: nova/compute/manager.py:954 +#: nova/compute/manager.py:960 msgid "Rebooting instance after nova-compute restart." msgstr "" -#: nova/compute/manager.py:964 +#: nova/compute/manager.py:970 msgid "Hypervisor driver does not support resume guests" msgstr "" -#: nova/compute/manager.py:969 +#: nova/compute/manager.py:975 msgid "Failed to resume instance" msgstr "" -#: nova/compute/manager.py:978 +#: nova/compute/manager.py:984 msgid "Hypervisor driver does not support firewall rules" msgstr "" -#: nova/compute/manager.py:1003 +#: nova/compute/manager.py:1009 #, python-format -msgid "Lifecycle event %(state)d on VM %(uuid)s" +msgid "VM %(state)s (Lifecycle Event)" msgstr "" -#: nova/compute/manager.py:1019 +#: nova/compute/manager.py:1025 #, python-format msgid "Unexpected power state %d" msgstr "" -#: nova/compute/manager.py:1124 +#: nova/compute/manager.py:1130 msgid "Hypervisor driver does not support security groups." msgstr "" -#: nova/compute/manager.py:1164 +#: nova/compute/manager.py:1168 #, python-format msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" -#: nova/compute/manager.py:1222 nova/compute/manager.py:1978 +#: nova/compute/manager.py:1225 nova/compute/manager.py:1982 msgid "Success" msgstr "" -#: nova/compute/manager.py:1246 +#: nova/compute/manager.py:1249 msgid "Instance disappeared before we could start it" msgstr "" -#: nova/compute/manager.py:1274 +#: nova/compute/manager.py:1276 msgid "Anti-affinity instance group policy was violated." msgstr "" -#: nova/compute/manager.py:1351 +#: nova/compute/manager.py:1353 msgid "Failed to dealloc network for deleted instance" msgstr "" -#: nova/compute/manager.py:1356 +#: nova/compute/manager.py:1358 msgid "Instance disappeared during build" msgstr "" -#: nova/compute/manager.py:1372 +#: nova/compute/manager.py:1374 msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1399 +#: nova/compute/manager.py:1401 #, python-format msgid "Error: %s" msgstr "" -#: nova/compute/manager.py:1445 nova/compute/manager.py:3473 +#: nova/compute/manager.py:1447 nova/compute/manager.py:3509 msgid "Error trying to reschedule" msgstr "" -#: nova/compute/manager.py:1500 +#: nova/compute/manager.py:1503 msgid "Instance build timed out. Set to error state." msgstr "" -#: nova/compute/manager.py:1510 nova/compute/manager.py:1870 +#: nova/compute/manager.py:1513 nova/compute/manager.py:1873 msgid "Starting instance..." msgstr "" -#: nova/compute/manager.py:1528 +#: nova/compute/manager.py:1531 #, python-format msgid "" "Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0." msgstr "" -#: nova/compute/manager.py:1553 +#: nova/compute/manager.py:1556 #, python-format msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1557 +#: nova/compute/manager.py:1560 #, python-format msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" -#: nova/compute/manager.py:1738 +#: nova/compute/manager.py:1741 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1758 nova/compute/manager.py:2086 -#: nova/compute/manager.py:3985 +#: nova/compute/manager.py:1761 nova/compute/manager.py:2098 +#: nova/compute/manager.py:4041 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1937 +#: nova/compute/manager.py:1941 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2002 +#: nova/compute/manager.py:2006 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2008 nova/compute/manager.py:2048 +#: nova/compute/manager.py:2012 nova/compute/manager.py:2060 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2012 nova/compute/manager.py:2050 +#: nova/compute/manager.py:2016 nova/compute/manager.py:2062 msgid "Failed to allocate the network(s), not rescheduling." msgstr "" -#: nova/compute/manager.py:2074 +#: nova/compute/manager.py:2086 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2076 +#: nova/compute/manager.py:2088 msgid "Failure prepping block device." msgstr "" -#: nova/compute/manager.py:2099 +#: nova/compute/manager.py:2111 msgid "Could not clean up failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2109 +#: nova/compute/manager.py:2121 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2130 +#: nova/compute/manager.py:2142 msgid "Failed to cleanup volumes for failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2169 +#: nova/compute/manager.py:2181 msgid "Failed to deallocate network for instance." msgstr "" -#: nova/compute/manager.py:2178 +#: nova/compute/manager.py:2202 #, python-format msgid "%(action_str)s instance" msgstr "" -#: nova/compute/manager.py:2222 +#: nova/compute/manager.py:2246 #, python-format msgid "Ignoring DiskNotFound: %s" msgstr "" -#: nova/compute/manager.py:2225 +#: nova/compute/manager.py:2249 #, python-format msgid "Ignoring VolumeNotFound: %s" msgstr "" -#: nova/compute/manager.py:2324 +#: nova/compute/manager.py:2353 msgid "Instance disappeared during terminate" msgstr "" -#: nova/compute/manager.py:2330 nova/compute/manager.py:3653 -#: nova/compute/manager.py:5671 +#: nova/compute/manager.py:2359 nova/compute/manager.py:3689 +#: nova/compute/manager.py:5769 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2503 +#: nova/compute/manager.py:2539 msgid "Rebuilding instance" msgstr "" -#: nova/compute/manager.py:2516 +#: nova/compute/manager.py:2552 msgid "Invalid state of instance files on shared storage" msgstr "" -#: nova/compute/manager.py:2520 +#: nova/compute/manager.py:2556 msgid "disk on shared storage, recreating using existing disk" msgstr "" -#: nova/compute/manager.py:2524 +#: nova/compute/manager.py:2560 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "" -#: nova/compute/manager.py:2535 nova/compute/manager.py:4790 +#: nova/compute/manager.py:2571 nova/compute/manager.py:4884 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:2611 +#: nova/compute/manager.py:2647 #, python-format msgid "bringing vm to original state: '%s'" msgstr "" -#: nova/compute/manager.py:2642 +#: nova/compute/manager.py:2678 #, python-format msgid "Detaching from volume api: %s" msgstr "" -#: nova/compute/manager.py:2669 +#: nova/compute/manager.py:2705 msgid "Rebooting instance" msgstr "" -#: nova/compute/manager.py:2686 +#: nova/compute/manager.py:2722 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " "%(running)s)" msgstr "" -#: nova/compute/manager.py:2722 +#: nova/compute/manager.py:2758 msgid "Reboot failed but instance is running" msgstr "" -#: nova/compute/manager.py:2730 +#: nova/compute/manager.py:2766 #, python-format msgid "Cannot reboot instance: %s" msgstr "" -#: nova/compute/manager.py:2742 +#: nova/compute/manager.py:2778 msgid "Instance disappeared during reboot" msgstr "" -#: nova/compute/manager.py:2810 +#: nova/compute/manager.py:2846 msgid "instance snapshotting" msgstr "" -#: nova/compute/manager.py:2816 +#: nova/compute/manager.py:2852 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " "%(running)s)" msgstr "" -#: nova/compute/manager.py:2854 +#: nova/compute/manager.py:2890 msgid "Image not found during snapshot" msgstr "" -#: nova/compute/manager.py:2936 +#: nova/compute/manager.py:2972 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "" -#: nova/compute/manager.py:2943 +#: nova/compute/manager.py:2979 msgid "Root password set" msgstr "" -#: nova/compute/manager.py:2948 +#: nova/compute/manager.py:2984 msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" -#: nova/compute/manager.py:2961 +#: nova/compute/manager.py:2997 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:2967 +#: nova/compute/manager.py:3003 msgid "error setting admin password" msgstr "" -#: nova/compute/manager.py:2983 +#: nova/compute/manager.py:3019 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " "expected: %(expected_state)s)" msgstr "" -#: nova/compute/manager.py:2988 +#: nova/compute/manager.py:3024 #, python-format msgid "injecting file to %s" msgstr "" -#: nova/compute/manager.py:3006 +#: nova/compute/manager.py:3042 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" msgstr "" -#: nova/compute/manager.py:3025 +#: nova/compute/manager.py:3061 msgid "Rescuing" msgstr "" -#: nova/compute/manager.py:3046 +#: nova/compute/manager.py:3082 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3050 +#: nova/compute/manager.py:3086 #, python-format msgid "Driver Error: %s" msgstr "" -#: nova/compute/manager.py:3073 +#: nova/compute/manager.py:3109 msgid "Unrescuing" msgstr "" -#: nova/compute/manager.py:3144 +#: nova/compute/manager.py:3180 #, python-format msgid "Migration %s is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3149 +#: nova/compute/manager.py:3185 #, python-format msgid "Migration %s is already confirmed" msgstr "" -#: nova/compute/manager.py:3153 +#: nova/compute/manager.py:3189 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " "confirmation process" msgstr "" -#: nova/compute/manager.py:3167 +#: nova/compute/manager.py:3203 msgid "Instance is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3348 +#: nova/compute/manager.py:3384 #, python-format msgid "Updating instance to original state: '%s'" msgstr "" -#: nova/compute/manager.py:3371 +#: nova/compute/manager.py:3407 msgid "Instance has no source host" msgstr "" -#: nova/compute/manager.py:3377 +#: nova/compute/manager.py:3413 msgid "destination same as source!" msgstr "" -#: nova/compute/manager.py:3395 +#: nova/compute/manager.py:3431 msgid "Migrating" msgstr "" -#: nova/compute/manager.py:3659 +#: nova/compute/manager.py:3695 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:3719 +#: nova/compute/manager.py:3755 msgid "Pausing" msgstr "" -#: nova/compute/manager.py:3736 +#: nova/compute/manager.py:3772 msgid "Unpausing" msgstr "" -#: nova/compute/manager.py:3777 +#: nova/compute/manager.py:3813 nova/compute/manager.py:3830 msgid "Retrieving diagnostics" msgstr "" -#: nova/compute/manager.py:3812 +#: nova/compute/manager.py:3866 msgid "Resuming" msgstr "" -#: nova/compute/manager.py:4028 +#: nova/compute/manager.py:4084 msgid "Get console output" msgstr "" -#: nova/compute/manager.py:4227 +#: nova/compute/manager.py:4283 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4236 +#: nova/compute/manager.py:4292 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4252 +#: nova/compute/manager.py:4308 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "" -#: nova/compute/manager.py:4263 +#: nova/compute/manager.py:4319 msgid "Detaching volume from unknown instance" msgstr "" -#: nova/compute/manager.py:4275 +#: nova/compute/manager.py:4331 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4348 +#: nova/compute/manager.py:4404 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4355 +#: nova/compute/manager.py:4411 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4442 +#: nova/compute/manager.py:4504 #, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "" -#: nova/compute/manager.py:4462 +#: nova/compute/manager.py:4524 #, python-format msgid "Port %s is not attached" msgstr "" -#: nova/compute/manager.py:4474 nova/tests/compute/test_compute.py:10545 +#: nova/compute/manager.py:4536 nova/tests/compute/test_compute.py:10612 #, python-format msgid "Host %s not found" msgstr "" -#: nova/compute/manager.py:4628 +#: nova/compute/manager.py:4690 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:4658 +#: nova/compute/manager.py:4753 msgid "_post_live_migration() is started.." msgstr "" -#: nova/compute/manager.py:4731 +#: nova/compute/manager.py:4825 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "" -#: nova/compute/manager.py:4733 +#: nova/compute/manager.py:4827 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." msgstr "" -#: nova/compute/manager.py:4758 +#: nova/compute/manager.py:4852 msgid "Post operation of migration started" msgstr "" -#: nova/compute/manager.py:4967 +#: nova/compute/manager.py:5057 msgid "An error occurred while refreshing the network cache." msgstr "" -#: nova/compute/manager.py:5021 +#: nova/compute/manager.py:5110 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" msgstr "" -#: nova/compute/manager.py:5026 +#: nova/compute/manager.py:5115 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "" -#: nova/compute/manager.py:5035 +#: nova/compute/manager.py:5124 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" msgstr "" -#: nova/compute/manager.py:5045 +#: nova/compute/manager.py:5134 #, python-format msgid "Instance %s not found" msgstr "" -#: nova/compute/manager.py:5050 +#: nova/compute/manager.py:5139 msgid "In ERROR state" msgstr "" -#: nova/compute/manager.py:5057 +#: nova/compute/manager.py:5146 #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "" -#: nova/compute/manager.py:5068 +#: nova/compute/manager.py:5157 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" -#: nova/compute/manager.py:5097 +#: nova/compute/manager.py:5186 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5117 +#: nova/compute/manager.py:5206 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." msgstr "" -#: nova/compute/manager.py:5137 +#: nova/compute/manager.py:5226 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5166 +#: nova/compute/manager.py:5255 msgid "Updating bandwidth usage cache" msgstr "" -#: nova/compute/manager.py:5188 +#: nova/compute/manager.py:5277 msgid "Bandwidth usage not supported by hypervisor." msgstr "" -#: nova/compute/manager.py:5311 +#: nova/compute/manager.py:5400 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." msgstr "" -#: nova/compute/manager.py:5318 nova/compute/manager.py:5381 -#, python-format -msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." -msgstr "" - -#: nova/compute/manager.py:5342 -msgid "Periodic sync_power_state task had an error while processing an instance." -msgstr "" - -#: nova/compute/manager.py:5368 +#: nova/compute/manager.py:5466 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" msgstr "" -#: nova/compute/manager.py:5406 +#: nova/compute/manager.py:5479 +#, python-format +msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + +#: nova/compute/manager.py:5504 msgid "Instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5418 nova/compute/manager.py:5427 -#: nova/compute/manager.py:5458 nova/compute/manager.py:5469 +#: nova/compute/manager.py:5516 nova/compute/manager.py:5525 +#: nova/compute/manager.py:5556 nova/compute/manager.py:5567 msgid "error during stop() in sync_power_state." msgstr "" -#: nova/compute/manager.py:5422 +#: nova/compute/manager.py:5520 msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5438 +#: nova/compute/manager.py:5536 msgid "Instance is paused unexpectedly. Ignore." msgstr "" -#: nova/compute/manager.py:5444 +#: nova/compute/manager.py:5542 msgid "Instance is unexpectedly not found. Ignore." msgstr "" -#: nova/compute/manager.py:5450 +#: nova/compute/manager.py:5548 msgid "Instance is not stopped. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5464 +#: nova/compute/manager.py:5562 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5478 +#: nova/compute/manager.py:5576 msgid "Instance is not (soft-)deleted." msgstr "" -#: nova/compute/manager.py:5507 +#: nova/compute/manager.py:5605 msgid "Reclaiming deleted instance" msgstr "" -#: nova/compute/manager.py:5511 +#: nova/compute/manager.py:5609 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5536 +#: nova/compute/manager.py:5634 #, python-format msgid "Deleting orphan compute node %s" msgstr "" -#: nova/compute/manager.py:5544 nova/compute/resource_tracker.py:392 +#: nova/compute/manager.py:5642 nova/compute/resource_tracker.py:391 #, python-format msgid "No service record for host %s" msgstr "" -#: nova/compute/manager.py:5585 +#: nova/compute/manager.py:5682 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5591 +#: nova/compute/manager.py:5688 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" " still present on host." msgstr "" -#: nova/compute/manager.py:5600 +#: nova/compute/manager.py:5697 msgid "set_bootable is not implemented for the current driver" msgstr "" -#: nova/compute/manager.py:5605 +#: nova/compute/manager.py:5702 msgid "Failed to power off instance" msgstr "" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5706 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5619 +#: nova/compute/manager.py:5716 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5623 +#: nova/compute/manager.py:5720 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "" -#: nova/compute/manager.py:5654 +#: nova/compute/manager.py:5752 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "" -#: nova/compute/manager.py:5664 +#: nova/compute/manager.py:5762 #, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "" -#: nova/compute/resource_tracker.py:106 +#: nova/compute/resource_tracker.py:105 msgid "" "Host field should not be set on the instance until resources have been " "claimed." msgstr "" -#: nova/compute/resource_tracker.py:111 +#: nova/compute/resource_tracker.py:110 msgid "" "Node field should not be set on the instance until resources have been " "claimed." msgstr "" -#: nova/compute/resource_tracker.py:273 +#: nova/compute/resource_tracker.py:272 #, python-format msgid "Cannot get the metrics from %s." msgstr "" -#: nova/compute/resource_tracker.py:292 +#: nova/compute/resource_tracker.py:291 msgid "Auditing locally available compute resources" msgstr "" -#: nova/compute/resource_tracker.py:297 +#: nova/compute/resource_tracker.py:296 msgid "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." msgstr "" -#: nova/compute/resource_tracker.py:372 +#: nova/compute/resource_tracker.py:371 #, python-format msgid "Compute_service record created for %(host)s:%(node)s" msgstr "" -#: nova/compute/resource_tracker.py:378 +#: nova/compute/resource_tracker.py:377 #, python-format msgid "Compute_service record updated for %(host)s:%(node)s" msgstr "" -#: nova/compute/resource_tracker.py:431 +#: nova/compute/resource_tracker.py:430 #, python-format msgid "Free ram (MB): %s" msgstr "" -#: nova/compute/resource_tracker.py:432 +#: nova/compute/resource_tracker.py:431 #, python-format msgid "Free disk (GB): %s" msgstr "" -#: nova/compute/resource_tracker.py:437 +#: nova/compute/resource_tracker.py:436 #, python-format msgid "Free VCPUS: %s" msgstr "" -#: nova/compute/resource_tracker.py:439 +#: nova/compute/resource_tracker.py:438 msgid "Free VCPU information unavailable" msgstr "" -#: nova/compute/resource_tracker.py:442 +#: nova/compute/resource_tracker.py:441 #, python-format msgid "PCI stats: %s" msgstr "" -#: nova/compute/resource_tracker.py:478 +#: nova/compute/resource_tracker.py:486 #, python-format msgid "Updating from migration %s" msgstr "" -#: nova/compute/resource_tracker.py:545 +#: nova/compute/resource_tracker.py:553 msgid "Instance not resizing, skipping migration." msgstr "" -#: nova/compute/resource_tracker.py:560 +#: nova/compute/resource_tracker.py:568 msgid "Flavor could not be found, skipping migration." msgstr "" -#: nova/compute/resource_tracker.py:650 +#: nova/compute/resource_tracker.py:658 #, python-format msgid "" "Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB " "memory)" msgstr "" -#: nova/compute/resource_tracker.py:664 +#: nova/compute/resource_tracker.py:672 #, python-format msgid "Missing keys: %s" msgstr "" @@ -5685,19 +5691,19 @@ msgstr "" msgid "Unable to find host for Instance %s" msgstr "" -#: nova/compute/utils.py:209 +#: nova/compute/utils.py:204 #, python-format msgid "Can't access image %(image_id)s: %(error)s" msgstr "" -#: nova/compute/utils.py:333 +#: nova/compute/utils.py:328 #, python-format msgid "" "No host name specified for the notification of HostAPI.%s and it will be " "ignored" msgstr "" -#: nova/compute/utils.py:461 +#: nova/compute/utils.py:456 #, python-format msgid "" "Value of 0 or None specified for %s. This behaviour will change in " @@ -5705,19 +5711,19 @@ msgid "" "'do not call'. To keep the 'do not call' behaviour, use a negative value." msgstr "" -#: nova/compute/monitors/__init__.py:177 +#: nova/compute/monitors/__init__.py:176 #, python-format msgid "" "Excluding monitor %(monitor_name)s due to metric name overlap; " "overlapping metrics: %(overlap)s" msgstr "" -#: nova/compute/monitors/__init__.py:185 +#: nova/compute/monitors/__init__.py:184 #, python-format msgid "Monitor %(monitor_name)s cannot be used: %(ex)s" msgstr "" -#: nova/compute/monitors/__init__.py:191 +#: nova/compute/monitors/__init__.py:190 #, python-format msgid "The following monitors have been disabled: %s" msgstr "" @@ -5727,11 +5733,11 @@ msgstr "" msgid "Not all properties needed are implemented in the compute driver: %s" msgstr "" -#: nova/conductor/api.py:300 +#: nova/conductor/api.py:318 msgid "nova-conductor connection established successfully" msgstr "" -#: nova/conductor/api.py:305 +#: nova/conductor/api.py:323 msgid "" "Timed out waiting for nova-conductor. Is it running? Or did this service" " start before nova-conductor? Reattempting establishment of nova-" @@ -5743,7 +5749,7 @@ msgstr "" msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s" msgstr "" -#: nova/conductor/manager.py:522 +#: nova/conductor/manager.py:523 msgid "No valid host found for cold migrate" msgstr "" @@ -5767,6 +5773,10 @@ msgstr "" msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED" msgstr "" +#: nova/conductor/manager.py:737 +msgid "No valid host found for rebuild" +msgstr "" + #: nova/conductor/tasks/live_migrate.py:113 #, python-format msgid "" @@ -5855,65 +5865,65 @@ msgstr "" msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:745 +#: nova/db/sqlalchemy/api.py:750 #, python-format msgid "Invalid floating ip id %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:850 +#: nova/db/sqlalchemy/api.py:855 msgid "Failed to update usages bulk deallocating floating IP" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 +#: nova/db/sqlalchemy/api.py:1011 #, python-format msgid "Invalid floating IP %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:1308 nova/db/sqlalchemy/api.py:1347 +#: nova/db/sqlalchemy/api.py:1313 nova/db/sqlalchemy/api.py:1352 #, python-format msgid "Invalid fixed IP Address %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:1482 +#: nova/db/sqlalchemy/api.py:1487 #, python-format msgid "Invalid virtual interface address %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: nova/db/sqlalchemy/api.py:1581 #, python-format msgid "" "Unknown osapi_compute_unique_server_name_scope value: %s Flag must be " "empty, \"global\" or \"project\"" msgstr "" -#: nova/db/sqlalchemy/api.py:1735 +#: nova/db/sqlalchemy/api.py:1741 #, python-format msgid "Invalid instance id %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:2013 +#: nova/db/sqlalchemy/api.py:2019 #, python-format msgid "Invalid field name: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:3242 +#: nova/db/sqlalchemy/api.py:3248 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:4892 +#: nova/db/sqlalchemy/api.py:4899 #, python-format msgid "" "Volume(%s) has lower stats then what is in the database. Instance must " "have been rebooted or crashed. Updating totals." msgstr "" -#: nova/db/sqlalchemy/api.py:5249 +#: nova/db/sqlalchemy/api.py:5256 #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" -#: nova/db/sqlalchemy/api.py:5639 +#: nova/db/sqlalchemy/api.py:5646 #, python-format msgid "IntegrityError detected when archiving table %s" msgstr "" @@ -5964,26 +5974,26 @@ msgstr "" msgid "Exception while seeding instance_types table" msgstr "" -#: nova/image/glance.py:231 +#: nova/image/glance.py:236 #, python-format msgid "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " "%(extra)s." msgstr "" -#: nova/image/glance.py:265 +#: nova/image/glance.py:268 #, python-format msgid "" "When loading the module %(module_str)s the following error occurred: " "%(ex)s" msgstr "" -#: nova/image/glance.py:303 +#: nova/image/glance.py:306 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "" -#: nova/image/glance.py:319 +#: nova/image/glance.py:322 #, python-format msgid "Successfully transferred using %s" msgstr "" @@ -6129,16 +6139,16 @@ msgstr "" msgid "Not deleting key %s" msgstr "" -#: nova/network/api.py:198 nova/network/neutronv2/api.py:797 +#: nova/network/api.py:195 nova/network/neutronv2/api.py:797 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "" -#: nova/network/base_api.py:49 +#: nova/network/base_api.py:48 msgid "Failed storing info cache" msgstr "" -#: nova/network/base_api.py:68 +#: nova/network/base_api.py:67 msgid "instance is a required argument to use @refresh_cache" msgstr "" @@ -6151,70 +6161,70 @@ msgstr "" msgid "Loading network driver '%s'" msgstr "" -#: nova/network/floating_ips.py:90 +#: nova/network/floating_ips.py:85 #, python-format msgid "Fixed ip %s not found" msgstr "" -#: nova/network/floating_ips.py:180 +#: nova/network/floating_ips.py:175 #, python-format msgid "Floating IP %s is not associated. Ignore." msgstr "" -#: nova/network/floating_ips.py:199 +#: nova/network/floating_ips.py:194 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "" -#: nova/network/floating_ips.py:203 +#: nova/network/floating_ips.py:198 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "" -#: nova/network/floating_ips.py:223 +#: nova/network/floating_ips.py:218 #, python-format msgid "Quota exceeded for %s, tried to allocate floating IP" msgstr "" -#: nova/network/floating_ips.py:283 +#: nova/network/floating_ips.py:277 msgid "Failed to update usages deallocating floating IP" msgstr "" -#: nova/network/floating_ips.py:385 +#: nova/network/floating_ips.py:375 #, python-format msgid "Failed to disassociated floating address: %s" msgstr "" -#: nova/network/floating_ips.py:390 +#: nova/network/floating_ips.py:380 #, python-format msgid "Interface %s not found" msgstr "" -#: nova/network/floating_ips.py:553 +#: nova/network/floating_ips.py:539 #, python-format msgid "Starting migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:560 +#: nova/network/floating_ips.py:545 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will not migrate it " msgstr "" -#: nova/network/floating_ips.py:593 +#: nova/network/floating_ips.py:574 #, python-format msgid "Finishing migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:601 +#: nova/network/floating_ips.py:581 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will notsetup it." msgstr "" -#: nova/network/floating_ips.py:644 +#: nova/network/floating_ips.py:624 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -6222,12 +6232,12 @@ msgid "" "ignored." msgstr "" -#: nova/network/floating_ips.py:684 +#: nova/network/floating_ips.py:664 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "" -#: nova/network/floating_ips.py:693 +#: nova/network/floating_ips.py:673 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "" @@ -6256,69 +6266,69 @@ msgstr "" msgid "This shouldn't be getting called except during testing." msgstr "" -#: nova/network/linux_net.py:227 +#: nova/network/linux_net.py:232 #, python-format msgid "Attempted to remove chain %s which does not exist" msgstr "" -#: nova/network/linux_net.py:263 +#: nova/network/linux_net.py:268 #, python-format msgid "Unknown chain: %r" msgstr "" -#: nova/network/linux_net.py:294 +#: nova/network/linux_net.py:301 #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " "%(top)r" msgstr "" -#: nova/network/linux_net.py:762 +#: nova/network/linux_net.py:769 #, python-format msgid "Removed %(num)d duplicate rules for floating ip %(float)s" msgstr "" -#: nova/network/linux_net.py:810 +#: nova/network/linux_net.py:817 #, python-format msgid "Error deleting conntrack entries for %s" msgstr "" -#: nova/network/linux_net.py:1068 +#: nova/network/linux_net.py:1072 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/network/linux_net.py:1150 +#: nova/network/linux_net.py:1154 #, python-format msgid "killing radvd threw %s" msgstr "" -#: nova/network/linux_net.py:1302 +#: nova/network/linux_net.py:1308 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: nova/network/linux_net.py:1360 +#: nova/network/linux_net.py:1366 #, python-format msgid "Failed removing net device: '%s'" msgstr "" -#: nova/network/linux_net.py:1532 +#: nova/network/linux_net.py:1543 #, python-format msgid "Adding interface %(interface)s to bridge %(bridge)s" msgstr "" -#: nova/network/linux_net.py:1538 +#: nova/network/linux_net.py:1549 #, python-format msgid "Failed to add interface: %s" msgstr "" -#: nova/network/manager.py:836 +#: nova/network/manager.py:828 #, python-format msgid "instance-dns-zone not found |%s|." msgstr "" -#: nova/network/manager.py:843 +#: nova/network/manager.py:835 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -6326,88 +6336,88 @@ msgid "" "created." msgstr "" -#: nova/network/manager.py:882 +#: nova/network/manager.py:874 #, python-format msgid "Quota exceeded for %s, tried to allocate fixed IP" msgstr "" -#: nova/network/manager.py:942 +#: nova/network/manager.py:934 msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required." msgstr "" -#: nova/network/manager.py:972 +#: nova/network/manager.py:964 msgid "Failed to update usages deallocating fixed IP" msgstr "" -#: nova/network/manager.py:996 +#: nova/network/manager.py:988 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "" -#: nova/network/manager.py:1037 +#: nova/network/manager.py:1029 #, python-format msgid "IP %s leased that is not associated" msgstr "" -#: nova/network/manager.py:1043 +#: nova/network/manager.py:1035 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "" -#: nova/network/manager.py:1052 +#: nova/network/manager.py:1044 #, python-format msgid "IP %s released that is not associated" msgstr "" -#: nova/network/manager.py:1056 +#: nova/network/manager.py:1048 #, python-format msgid "IP %s released that was not leased" msgstr "" -#: nova/network/manager.py:1074 +#: nova/network/manager.py:1066 #, python-format msgid "%s must be an integer" msgstr "" -#: nova/network/manager.py:1106 +#: nova/network/manager.py:1098 msgid "Maximum allowed length for 'label' is 255." msgstr "" -#: nova/network/manager.py:1126 +#: nova/network/manager.py:1118 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " "network_size flag." msgstr "" -#: nova/network/manager.py:1211 +#: nova/network/manager.py:1203 msgid "cidr already in use" msgstr "" -#: nova/network/manager.py:1214 +#: nova/network/manager.py:1206 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "" -#: nova/network/manager.py:1225 +#: nova/network/manager.py:1217 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " "(%(smaller)s)" msgstr "" -#: nova/network/manager.py:1320 +#: nova/network/manager.py:1311 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "" -#: nova/network/manager.py:1949 +#: nova/network/manager.py:1937 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" msgstr "" -#: nova/network/manager.py:1956 +#: nova/network/manager.py:1944 #, python-format msgid "" "The network range is not big enough to fit %(num_networks)s networks. " @@ -6632,22 +6642,22 @@ msgstr "" msgid "Error setting %(attr)s" msgstr "" -#: nova/objects/base.py:247 +#: nova/objects/base.py:256 #, python-format msgid "Unable to instantiate unregistered object type %(objtype)s" msgstr "" -#: nova/objects/base.py:366 +#: nova/objects/base.py:375 #, python-format msgid "Cannot load '%s' in the base class" msgstr "" -#: nova/objects/base.py:412 +#: nova/objects/base.py:421 #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "" -#: nova/objects/block_device.py:136 +#: nova/objects/block_device.py:149 msgid "Volume does not belong to the requested instance." msgstr "" @@ -6661,44 +6671,44 @@ msgstr "" msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s" msgstr "" -#: nova/objects/fields.py:157 +#: nova/objects/fields.py:165 #, python-format msgid "Field `%s' cannot be None" msgstr "" -#: nova/objects/fields.py:232 +#: nova/objects/fields.py:246 #, python-format msgid "A string is required here, not %s" msgstr "" -#: nova/objects/fields.py:268 +#: nova/objects/fields.py:286 msgid "A datetime.datetime is required here" msgstr "" -#: nova/objects/fields.py:306 nova/objects/fields.py:315 -#: nova/objects/fields.py:324 +#: nova/objects/fields.py:328 nova/objects/fields.py:337 +#: nova/objects/fields.py:346 #, python-format msgid "Network \"%s\" is not valid" msgstr "" -#: nova/objects/fields.py:363 +#: nova/objects/fields.py:385 msgid "A list is required here" msgstr "" -#: nova/objects/fields.py:379 +#: nova/objects/fields.py:405 msgid "A dict is required here" msgstr "" -#: nova/objects/fields.py:418 +#: nova/objects/fields.py:449 #, python-format msgid "An object of type %s is required here" msgstr "" -#: nova/objects/fields.py:445 +#: nova/objects/fields.py:488 msgid "A NetworkModel is required here" msgstr "" -#: nova/objects/instance.py:432 +#: nova/objects/instance.py:431 #, python-format msgid "No save handler for %s" msgstr "" @@ -6726,7 +6736,7 @@ msgstr "" msgid "Snapshot list encountered but no header found!" msgstr "" -#: nova/openstack/common/lockutils.py:102 +#: nova/openstack/common/lockutils.py:101 #, python-format msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" @@ -6751,7 +6761,7 @@ msgstr "" msgid "Fatal call to deprecated config: %(msg)s" msgstr "" -#: nova/openstack/common/periodic_task.py:39 +#: nova/openstack/common/periodic_task.py:40 #, python-format msgid "Unexpected argument for periodic task creation: %(arg)s." msgstr "" @@ -6815,12 +6825,12 @@ msgstr "" msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: nova/openstack/common/strutils.py:202 +#: nova/openstack/common/strutils.py:197 #, python-format msgid "Invalid unit system: \"%s\"" msgstr "" -#: nova/openstack/common/strutils.py:211 +#: nova/openstack/common/strutils.py:206 #, python-format msgid "Invalid string format: %s" msgstr "" @@ -6934,54 +6944,54 @@ msgstr "" msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s" msgstr "" -#: nova/scheduler/filter_scheduler.py:170 +#: nova/scheduler/filter_scheduler.py:169 msgid "Instance disappeared during scheduling" msgstr "" -#: nova/scheduler/host_manager.py:173 +#: nova/scheduler/host_manager.py:169 #, python-format msgid "Metric name unknown of %r" msgstr "" -#: nova/scheduler/host_manager.py:188 +#: nova/scheduler/host_manager.py:184 #, python-format msgid "" "Host has more disk space than database expected (%(physical)sgb > " "%(database)sgb)" msgstr "" -#: nova/scheduler/host_manager.py:365 +#: nova/scheduler/host_manager.py:311 #, python-format msgid "Host filter ignoring hosts: %s" msgstr "" -#: nova/scheduler/host_manager.py:377 +#: nova/scheduler/host_manager.py:323 #, python-format msgid "Host filter forcing available hosts to %s" msgstr "" -#: nova/scheduler/host_manager.py:380 +#: nova/scheduler/host_manager.py:326 #, python-format msgid "No hosts matched due to not matching 'force_hosts' value of '%s'" msgstr "" -#: nova/scheduler/host_manager.py:393 +#: nova/scheduler/host_manager.py:339 #, python-format msgid "Host filter forcing available nodes to %s" msgstr "" -#: nova/scheduler/host_manager.py:396 +#: nova/scheduler/host_manager.py:342 #, python-format msgid "No nodes matched due to not matching 'force_nodes' value of '%s'" msgstr "" -#: nova/scheduler/host_manager.py:444 +#: nova/scheduler/host_manager.py:390 #: nova/scheduler/filters/trusted_filter.py:208 #, python-format msgid "No service for compute ID %s" msgstr "" -#: nova/scheduler/host_manager.py:462 +#: nova/scheduler/host_manager.py:408 #, python-format msgid "Removing dead compute node %(host)s:%(node)s from scheduler" msgstr "" @@ -7136,24 +7146,28 @@ msgstr "" msgid "already detached" msgstr "" -#: nova/tests/api/test_auth.py:97 +#: nova/tests/api/test_auth.py:98 msgid "unexpected role header" msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3202 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2425 +#: nova/tests/api/openstack/test_faults.py:46 +msgid "Should be translated." +msgstr "" + +#: nova/tests/api/openstack/compute/test_servers.py:3225 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2434 msgid "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3207 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2430 +#: nova/tests/api/openstack/compute/test_servers.py:3230 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2439 msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3212 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2435 +#: nova/tests/api/openstack/compute/test_servers.py:3235 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2444 msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "" @@ -7161,7 +7175,7 @@ msgstr "" #: nova/tests/compute/test_compute.py:1707 #: nova/tests/compute/test_compute.py:1785 #: nova/tests/compute/test_compute.py:1825 -#: nova/tests/compute/test_compute.py:5546 +#: nova/tests/compute/test_compute.py:5603 #, python-format msgid "Running instances: %s" msgstr "" @@ -7173,16 +7187,16 @@ msgstr "" msgid "After terminating instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:5557 +#: nova/tests/compute/test_compute.py:5614 #, python-format msgid "After force-killing instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:6173 +#: nova/tests/compute/test_compute.py:6229 msgid "wrong host/node" msgstr "" -#: nova/tests/compute/test_compute.py:10753 +#: nova/tests/compute/test_compute.py:10820 msgid "spawn error" msgstr "" @@ -7269,34 +7283,56 @@ msgstr "" msgid "Unexpected status code" msgstr "" -#: nova/tests/virt/hyperv/test_hypervapi.py:512 +#: nova/tests/virt/hyperv/test_hypervapi.py:517 msgid "fake vswitch not found" msgstr "" -#: nova/tests/virt/hyperv/test_hypervapi.py:965 +#: nova/tests/virt/hyperv/test_hypervapi.py:970 msgid "Simulated failure" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1019 +#: nova/tests/virt/libvirt/fakelibvirt.py:1041 msgid "Expected a list for 'auth' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1023 +#: nova/tests/virt/libvirt/fakelibvirt.py:1045 msgid "Expected a function in 'auth[0]' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1027 +#: nova/tests/virt/libvirt/fakelibvirt.py:1049 msgid "Expected a function in 'auth[1]' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1038 +#: nova/tests/virt/libvirt/fakelibvirt.py:1060 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." msgstr "" -#: nova/tests/virt/vmwareapi/test_vm_util.py:196 -#: nova/virt/vmwareapi/vm_util.py:1087 +#: nova/tests/virt/vmwareapi/fake.py:244 +#, python-format +msgid "Property %(attr)s not set for the managed object %(name)s" +msgstr "" + +#: nova/tests/virt/vmwareapi/fake.py:969 +msgid "There is no VM registered" +msgstr "" + +#: nova/tests/virt/vmwareapi/fake.py:971 nova/tests/virt/vmwareapi/fake.py:1307 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: nova/tests/virt/vmwareapi/fake.py:1096 +msgid "Session Invalid" +msgstr "" + +#: nova/tests/virt/vmwareapi/fake.py:1304 +msgid "No Virtual Machine has been registered yet" +msgstr "" + +#: nova/tests/virt/vmwareapi/test_ds_util.py:221 +#: nova/virt/vmwareapi/ds_util.py:265 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7308,15 +7344,15 @@ msgid "" "left to copy" msgstr "" -#: nova/tests/virt/xenapi/image/test_bittorrent.py:126 -#: nova/virt/xenapi/image/bittorrent.py:81 +#: nova/tests/virt/xenapi/image/test_bittorrent.py:125 +#: nova/virt/xenapi/image/bittorrent.py:80 msgid "" "Cannot create default bittorrent URL without torrent_base_url set or " "torrent URL fetcher extension" msgstr "" -#: nova/tests/virt/xenapi/image/test_bittorrent.py:160 -#: nova/virt/xenapi/image/bittorrent.py:85 +#: nova/tests/virt/xenapi/image/test_bittorrent.py:159 +#: nova/virt/xenapi/image/bittorrent.py:84 msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "" @@ -7330,79 +7366,99 @@ msgstr "" msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/virt/cpu.py:56 nova/virt/cpu.py:60 -#, python-format -msgid "Invalid range expression %r" +#: nova/virt/driver.py:1242 +msgid "Event must be an instance of nova.virt.event.Event" msgstr "" -#: nova/virt/cpu.py:69 +#: nova/virt/driver.py:1248 #, python-format -msgid "Invalid exclusion expression %r" +msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "" -#: nova/virt/cpu.py:76 -#, python-format -msgid "Invalid inclusion expression %r" +#: nova/virt/driver.py:1330 +msgid "Compute driver option required, but not specified" msgstr "" -#: nova/virt/cpu.py:81 +#: nova/virt/driver.py:1333 #, python-format -msgid "No CPUs available after parsing %r" +msgid "Loading compute driver '%s'" msgstr "" -#: nova/virt/driver.py:1207 -msgid "Event must be an instance of nova.virt.event.Event" +#: nova/virt/driver.py:1340 +msgid "Unable to load the virtualization driver" msgstr "" -#: nova/virt/driver.py:1213 -#, python-format -msgid "Exception dispatching event %(event)s: %(ex)s" +#: nova/virt/event.py:33 +msgid "Started" msgstr "" -#: nova/virt/driver.py:1295 -msgid "Compute driver option required, but not specified" +#: nova/virt/event.py:34 +msgid "Stopped" msgstr "" -#: nova/virt/driver.py:1298 -#, python-format -msgid "Loading compute driver '%s'" +#: nova/virt/event.py:35 +msgid "Paused" msgstr "" -#: nova/virt/driver.py:1305 -msgid "Unable to load the virtualization driver" +#: nova/virt/event.py:36 +msgid "Resumed" +msgstr "" + +#: nova/virt/event.py:108 +msgid "Unknown" msgstr "" -#: nova/virt/fake.py:216 +#: nova/virt/fake.py:217 #, python-format msgid "Key '%(key)s' not in instances '%(inst)s'" msgstr "" -#: nova/virt/firewall.py:178 +#: nova/virt/firewall.py:176 msgid "Attempted to unfilter instance which is not filtered" msgstr "" -#: nova/virt/images.py:86 +#: nova/virt/hardware.py:45 +#, python-format +msgid "No CPUs available after parsing %r" +msgstr "" + +#: nova/virt/hardware.py:77 nova/virt/hardware.py:81 +#, python-format +msgid "Invalid range expression %r" +msgstr "" + +#: nova/virt/hardware.py:90 +#, python-format +msgid "Invalid exclusion expression %r" +msgstr "" + +#: nova/virt/hardware.py:97 +#, python-format +msgid "Invalid inclusion expression %r" +msgstr "" + +#: nova/virt/images.py:81 msgid "'qemu-img info' parsing failed." msgstr "" -#: nova/virt/images.py:92 +#: nova/virt/images.py:87 #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "" -#: nova/virt/images.py:105 +#: nova/virt/images.py:100 #, python-format msgid "" "%(base)s virtual size %(disk_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/images.py:122 +#: nova/virt/images.py:117 #, python-format msgid "Converted to raw, but format is now %s" msgstr "" -#: nova/virt/storage_users.py:63 nova/virt/storage_users.py:101 +#: nova/virt/storage_users.py:64 nova/virt/storage_users.py:102 #, python-format msgid "Cannot decode JSON from %(id_path)s" msgstr "" @@ -7445,27 +7501,27 @@ msgstr "" msgid "Baremetal power manager failed to restart node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:375 +#: nova/virt/baremetal/driver.py:376 #, python-format msgid "Destroy called on non-existing instance %s" msgstr "" -#: nova/virt/baremetal/driver.py:393 +#: nova/virt/baremetal/driver.py:394 #, python-format msgid "Error from baremetal driver during destroy: %s" msgstr "" -#: nova/virt/baremetal/driver.py:398 +#: nova/virt/baremetal/driver.py:399 #, python-format msgid "Error while recording destroy failure in baremetal database: %s" msgstr "" -#: nova/virt/baremetal/driver.py:413 +#: nova/virt/baremetal/driver.py:414 #, python-format msgid "Baremetal power manager failed to stop node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:426 +#: nova/virt/baremetal/driver.py:427 #, python-format msgid "Baremetal power manager failed to start node for instance %r" msgstr "" @@ -7703,16 +7759,16 @@ msgstr "" msgid "baremetal driver was unable to delete tid %s" msgstr "" -#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:189 +#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:196 msgid "Could not determine iscsi initiator name" msgstr "" -#: nova/virt/baremetal/volume_driver.py:234 +#: nova/virt/baremetal/volume_driver.py:225 #, python-format msgid "No fixed PXE IP is associated to %s" msgstr "" -#: nova/virt/baremetal/volume_driver.py:288 +#: nova/virt/baremetal/volume_driver.py:283 #, python-format msgid "detach volume could not find tid for %s" msgstr "" @@ -7742,47 +7798,47 @@ msgstr "" msgid "Baremetal virtual interface %s not found" msgstr "" -#: nova/virt/disk/api.py:285 +#: nova/virt/disk/api.py:280 msgid "image already mounted" msgstr "" -#: nova/virt/disk/api.py:359 +#: nova/virt/disk/api.py:354 #, python-format msgid "Ignoring error injecting data into image (%(e)s)" msgstr "" -#: nova/virt/disk/api.py:381 +#: nova/virt/disk/api.py:376 #, python-format msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': " "%(errors)s" msgstr "" -#: nova/virt/disk/api.py:411 +#: nova/virt/disk/api.py:406 #, python-format msgid "Failed to teardown container filesystem: %s" msgstr "" -#: nova/virt/disk/api.py:424 +#: nova/virt/disk/api.py:419 #, python-format msgid "Failed to umount container filesystem: %s" msgstr "" -#: nova/virt/disk/api.py:449 +#: nova/virt/disk/api.py:444 #, python-format msgid "Ignoring error injecting %(inject)s into image (%(e)s)" msgstr "" -#: nova/virt/disk/api.py:609 +#: nova/virt/disk/api.py:604 msgid "Not implemented on Windows" msgstr "" -#: nova/virt/disk/api.py:636 +#: nova/virt/disk/api.py:631 #, python-format msgid "User %(username)s not found in password file." msgstr "" -#: nova/virt/disk/api.py:652 +#: nova/virt/disk/api.py:647 #, python-format msgid "User %(username)s not found in shadow file." msgstr "" @@ -7913,11 +7969,11 @@ msgstr "" msgid "The ISCSI initiator name can't be found. Choosing the default one" msgstr "" -#: nova/virt/hyperv/driver.py:165 +#: nova/virt/hyperv/driver.py:169 msgid "VIF plugging is not supported by the Hyper-V driver." msgstr "" -#: nova/virt/hyperv/driver.py:170 +#: nova/virt/hyperv/driver.py:174 msgid "VIF unplugging is not supported by the Hyper-V driver." msgstr "" @@ -7999,7 +8055,7 @@ msgstr "" msgid "No external vswitch found" msgstr "" -#: nova/virt/hyperv/pathutils.py:71 +#: nova/virt/hyperv/pathutils.py:72 #, python-format msgid "The file copy from %(src)s to %(dest)s failed" msgstr "" @@ -8009,30 +8065,30 @@ msgstr "" msgid "Failed to remove snapshot for VM %s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:65 nova/virt/hyperv/vhdutilsv2.py:63 +#: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64 #, python-format msgid "Unsupported disk format: %s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:150 +#: nova/virt/hyperv/vhdutils.py:151 #, python-format msgid "The %(vhd_type)s type VHD is not supported" msgstr "" -#: nova/virt/hyperv/vhdutils.py:161 +#: nova/virt/hyperv/vhdutils.py:162 #, python-format msgid "Unable to obtain block size from VHD %(vhd_path)s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:208 +#: nova/virt/hyperv/vhdutils.py:209 msgid "Unsupported virtual disk format" msgstr "" -#: nova/virt/hyperv/vhdutilsv2.py:134 +#: nova/virt/hyperv/vhdutilsv2.py:135 msgid "Differencing VHDX images are not supported" msgstr "" -#: nova/virt/hyperv/vhdutilsv2.py:157 +#: nova/virt/hyperv/vhdutilsv2.py:158 #, python-format msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s" msgstr "" @@ -8053,12 +8109,12 @@ msgstr "" msgid "Spawning new instance" msgstr "" -#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:520 +#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:567 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "" -#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:524 +#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:571 msgid "Using config drive for instance" msgstr "" @@ -8067,7 +8123,7 @@ msgstr "" msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:549 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:596 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" @@ -8123,12 +8179,12 @@ msgstr "" msgid "Metrics collection is not supported on this version of Hyper-V" msgstr "" -#: nova/virt/hyperv/volumeops.py:146 +#: nova/virt/hyperv/volumeops.py:148 #, python-format msgid "Unable to attach volume to instance %s" msgstr "" -#: nova/virt/hyperv/volumeops.py:215 nova/virt/hyperv/volumeops.py:229 +#: nova/virt/hyperv/volumeops.py:222 nova/virt/hyperv/volumeops.py:236 #, python-format msgid "Unable to find a mounted disk for target_iqn: %s" msgstr "" @@ -8158,136 +8214,136 @@ msgstr "" msgid "Unable to determine disk bus for '%s'" msgstr "" -#: nova/virt/libvirt/driver.py:542 +#: nova/virt/libvirt/driver.py:556 #, python-format msgid "Connection to libvirt lost: %s" msgstr "" -#: nova/virt/libvirt/driver.py:724 +#: nova/virt/libvirt/driver.py:739 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" -#: nova/virt/libvirt/driver.py:868 +#: nova/virt/libvirt/driver.py:932 msgid "operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:1187 +#: nova/virt/libvirt/driver.py:1257 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" -#: nova/virt/libvirt/driver.py:1194 +#: nova/virt/libvirt/driver.py:1264 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" -#: nova/virt/libvirt/driver.py:1292 +#: nova/virt/libvirt/driver.py:1352 msgid "Swap only supports host devices" msgstr "" -#: nova/virt/libvirt/driver.py:1579 +#: nova/virt/libvirt/driver.py:1635 msgid "libvirt error while requesting blockjob info." msgstr "" -#: nova/virt/libvirt/driver.py:1712 +#: nova/virt/libvirt/driver.py:1776 msgid "Found no disk to snapshot." msgstr "" -#: nova/virt/libvirt/driver.py:1790 +#: nova/virt/libvirt/driver.py:1868 #, python-format msgid "Unknown type: %s" msgstr "" -#: nova/virt/libvirt/driver.py:1795 +#: nova/virt/libvirt/driver.py:1873 msgid "snapshot_id required in create_info" msgstr "" -#: nova/virt/libvirt/driver.py:1853 +#: nova/virt/libvirt/driver.py:1931 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" -#: nova/virt/libvirt/driver.py:1860 +#: nova/virt/libvirt/driver.py:1938 #, python-format msgid "Unknown delete_info type %s" msgstr "" -#: nova/virt/libvirt/driver.py:1890 +#: nova/virt/libvirt/driver.py:1966 #, python-format -msgid "Unable to locate disk matching id: %s" +msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2330 nova/virt/xenapi/vmops.py:1552 +#: nova/virt/libvirt/driver.py:2407 nova/virt/xenapi/vmops.py:1552 msgid "Guest does not have a console available" msgstr "" -#: nova/virt/libvirt/driver.py:2746 +#: nova/virt/libvirt/driver.py:2823 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "" -#: nova/virt/libvirt/driver.py:2912 +#: nova/virt/libvirt/driver.py:2989 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" msgstr "" -#: nova/virt/libvirt/driver.py:2918 +#: nova/virt/libvirt/driver.py:2995 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" -#: nova/virt/libvirt/driver.py:2922 +#: nova/virt/libvirt/driver.py:2999 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" -#: nova/virt/libvirt/driver.py:2942 +#: nova/virt/libvirt/driver.py:3019 msgid "" "Passthrough of the host CPU was requested but this libvirt version does " "not support this feature" msgstr "" -#: nova/virt/libvirt/driver.py:3475 +#: nova/virt/libvirt/driver.py:3567 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3496 +#: nova/virt/libvirt/driver.py:3588 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3760 +#: nova/virt/libvirt/driver.py:3851 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "" -#: nova/virt/libvirt/driver.py:3890 +#: nova/virt/libvirt/driver.py:3974 msgid "libvirt version is too old (does not support getVersion)" msgstr "" -#: nova/virt/libvirt/driver.py:4251 +#: nova/virt/libvirt/driver.py:4335 msgid "Block migration can not be used with shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:4259 +#: nova/virt/libvirt/driver.py:4344 msgid "Live migration can not be used without shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:4303 +#: nova/virt/libvirt/driver.py:4414 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" msgstr "" -#: nova/virt/libvirt/driver.py:4342 +#: nova/virt/libvirt/driver.py:4453 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8297,59 +8353,85 @@ msgid "" "Refer to %(u)s" msgstr "" -#: nova/virt/libvirt/driver.py:4409 +#: nova/virt/libvirt/driver.py:4516 #, python-format msgid "The firewall filter for %s does not exist" msgstr "" -#: nova/virt/libvirt/driver.py:4900 +#: nova/virt/libvirt/driver.py:4579 +msgid "" +"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " +"or your destination node does not support retrieving listen addresses. " +"In order for live migration to work properly, you must configure the " +"graphics (VNC and/or SPICE) listen addresses to be either the catch-all " +"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." +msgstr "" + +#: nova/virt/libvirt/driver.py:4596 +msgid "" +"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," +" and the graphics (VNC and/or SPICE) listen addresses on the destination" +" node do not match the addresses on the source node. Since the source " +"node has listen addresses set to either the catch-all address (0.0.0.0 or" +" ::) or the local address (127.0.0.1 or ::1), the live migration will " +"succeed, but the VM will continue to listen on the current addresses." +msgstr "" + +#: nova/virt/libvirt/driver.py:4964 +#, python-format +msgid "" +"Error from libvirt while getting description of %(instance_name)s: [Error" +" Code %(error_code)s] %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:5090 msgid "Unable to resize disk down." msgstr "" -#: nova/virt/libvirt/imagebackend.py:258 +#: nova/virt/libvirt/imagebackend.py:257 #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:273 +#: nova/virt/libvirt/imagebackend.py:272 msgid "Attempted overwrite of an existing value." msgstr "" -#: nova/virt/libvirt/imagebackend.py:429 +#: nova/virt/libvirt/imagebackend.py:433 msgid "You should specify images_volume_group flag to use LVM images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:544 +#: nova/virt/libvirt/imagebackend.py:548 msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:658 +#: nova/virt/libvirt/imagebackend.py:660 msgid "rbd python libraries not found" msgstr "" -#: nova/virt/libvirt/imagebackend.py:697 +#: nova/virt/libvirt/imagebackend.py:703 #, python-format msgid "Unknown image_type=%s" msgstr "" -#: nova/virt/libvirt/lvm.py:55 +#: nova/virt/libvirt/lvm.py:54 #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db " "available, but %(size)db required by volume %(lv)s." msgstr "" -#: nova/virt/libvirt/lvm.py:103 +#: nova/virt/libvirt/lvm.py:102 #, python-format msgid "vg %s must be LVM volume group" msgstr "" -#: nova/virt/libvirt/lvm.py:146 +#: nova/virt/libvirt/lvm.py:145 #, python-format msgid "Path %s must be LVM logical volume" msgstr "" -#: nova/virt/libvirt/lvm.py:222 +#: nova/virt/libvirt/lvm.py:221 #, python-format msgid "volume_clear='%s' is not handled" msgstr "" @@ -8358,273 +8440,234 @@ msgstr "" msgid "Cannot find any Fibre Channel HBAs" msgstr "" -#: nova/virt/libvirt/utils.py:431 +#: nova/virt/libvirt/utils.py:437 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" -#: nova/virt/libvirt/vif.py:353 nova/virt/libvirt/vif.py:608 -#: nova/virt/libvirt/vif.py:797 +#: nova/virt/libvirt/vif.py:356 nova/virt/libvirt/vif.py:574 +#: nova/virt/libvirt/vif.py:750 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" -#: nova/virt/libvirt/vif.py:397 nova/virt/libvirt/vif.py:628 -#: nova/virt/libvirt/vif.py:817 +#: nova/virt/libvirt/vif.py:362 nova/virt/libvirt/vif.py:580 +#: nova/virt/libvirt/vif.py:756 #, python-format msgid "Unexpected vif_type=%s" msgstr "" -#: nova/virt/libvirt/volume.py:291 +#: nova/virt/libvirt/volume.py:294 #, python-format msgid "iSCSI device not found at %s" msgstr "" -#: nova/virt/libvirt/volume.py:737 +#: nova/virt/libvirt/volume.py:740 #, python-format msgid "AoE device not found at %s" msgstr "" -#: nova/virt/libvirt/volume.py:909 +#: nova/virt/libvirt/volume.py:912 msgid "We are unable to locate any Fibre Channel devices" msgstr "" -#: nova/virt/libvirt/volume.py:928 +#: nova/virt/libvirt/volume.py:931 msgid "Fibre Channel device not found." msgstr "" -#: nova/virt/vmwareapi/driver.py:103 +#: nova/virt/vmwareapi/driver.py:104 msgid "" "The VMware ESX driver is now deprecated and will be removed in the Juno " "release. The VC driver will remain and continue to be supported." msgstr "" -#: nova/virt/vmwareapi/driver.py:115 +#: nova/virt/vmwareapi/driver.py:116 msgid "" "Must specify host_ip, host_username and host_password to use " "compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver" msgstr "" -#: nova/virt/vmwareapi/driver.py:127 +#: nova/virt/vmwareapi/driver.py:128 #, python-format msgid "Invalid Regular Expression %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:242 +#: nova/virt/vmwareapi/driver.py:243 msgid "Instance cannot be found in host, or in an unknownstate." msgstr "" -#: nova/virt/vmwareapi/driver.py:398 +#: nova/virt/vmwareapi/driver.py:403 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "" -#: nova/virt/vmwareapi/driver.py:407 +#: nova/virt/vmwareapi/driver.py:412 #, python-format msgid "The following clusters could not be found in the vCenter %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:544 +#: nova/virt/vmwareapi/driver.py:551 #, python-format msgid "The resource %s does not exist" msgstr "" -#: nova/virt/vmwareapi/driver.py:590 +#: nova/virt/vmwareapi/driver.py:597 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:757 +#: nova/virt/vmwareapi/driver.py:771 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." msgstr "" -#: nova/virt/vmwareapi/driver.py:845 -#, python-format -msgid "" -"Unable to connect to server at %(server)s, sleeping for %(seconds)s " -"seconds" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:865 +#: nova/virt/vmwareapi/driver.py:884 #, python-format msgid "Unable to validate session %s!" msgstr "" -#: nova/virt/vmwareapi/driver.py:906 +#: nova/virt/vmwareapi/driver.py:926 #, python-format msgid "Session %s is inactive!" msgstr "" -#: nova/virt/vmwareapi/driver.py:954 -#, python-format -msgid "In vmwareapi: _call_method (session=%s)" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:998 +#: nova/virt/vmwareapi/driver.py:1017 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "" -#: nova/virt/vmwareapi/driver.py:1008 +#: nova/virt/vmwareapi/driver.py:1027 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:38 +#: nova/virt/vmwareapi/ds_util.py:41 msgid "Datastore name cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:40 +#: nova/virt/vmwareapi/ds_util.py:43 msgid "Datastore reference cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:42 +#: nova/virt/vmwareapi/ds_util.py:45 msgid "Invalid capacity" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:45 +#: nova/virt/vmwareapi/ds_util.py:48 msgid "Capacity is smaller than free space" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:106 +#: nova/virt/vmwareapi/ds_util.py:109 msgid "datastore name empty" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:111 +#: nova/virt/vmwareapi/ds_util.py:114 nova/virt/vmwareapi/ds_util.py:146 msgid "path component cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:144 +#: nova/virt/vmwareapi/ds_util.py:160 msgid "datastore path empty" msgstr "" -#: nova/virt/vmwareapi/error_util.py:46 +#: nova/virt/vmwareapi/error_util.py:45 msgid "exception_summary must not be a list" msgstr "" -#: nova/virt/vmwareapi/error_util.py:76 +#: nova/virt/vmwareapi/error_util.py:75 msgid "fault_list must be a list" msgstr "" -#: nova/virt/vmwareapi/error_util.py:122 +#: nova/virt/vmwareapi/error_util.py:121 #, python-format msgid "Error(s) %s occurred in the call to RetrievePropertiesEx" msgstr "" -#: nova/virt/vmwareapi/error_util.py:136 +#: nova/virt/vmwareapi/error_util.py:135 msgid "VMware Driver fault." msgstr "" -#: nova/virt/vmwareapi/error_util.py:142 +#: nova/virt/vmwareapi/error_util.py:141 msgid "VMware Driver configuration fault." msgstr "" -#: nova/virt/vmwareapi/error_util.py:146 +#: nova/virt/vmwareapi/error_util.py:145 msgid "No default value for use_linked_clone found." msgstr "" -#: nova/virt/vmwareapi/error_util.py:150 +#: nova/virt/vmwareapi/error_util.py:149 #, python-format msgid "Missing parameter : %(param)s" msgstr "" -#: nova/virt/vmwareapi/error_util.py:154 +#: nova/virt/vmwareapi/error_util.py:153 msgid "No root disk defined." msgstr "" -#: nova/virt/vmwareapi/error_util.py:158 +#: nova/virt/vmwareapi/error_util.py:157 msgid "Resource already exists." msgstr "" -#: nova/virt/vmwareapi/error_util.py:163 +#: nova/virt/vmwareapi/error_util.py:162 msgid "Cannot delete file." msgstr "" -#: nova/virt/vmwareapi/error_util.py:168 +#: nova/virt/vmwareapi/error_util.py:167 msgid "File already exists." msgstr "" -#: nova/virt/vmwareapi/error_util.py:173 +#: nova/virt/vmwareapi/error_util.py:172 msgid "File fault." msgstr "" -#: nova/virt/vmwareapi/error_util.py:178 +#: nova/virt/vmwareapi/error_util.py:177 msgid "File locked." msgstr "" -#: nova/virt/vmwareapi/error_util.py:183 +#: nova/virt/vmwareapi/error_util.py:182 msgid "File not found." msgstr "" -#: nova/virt/vmwareapi/error_util.py:188 +#: nova/virt/vmwareapi/error_util.py:187 msgid "Invalid property." msgstr "" -#: nova/virt/vmwareapi/error_util.py:193 +#: nova/virt/vmwareapi/error_util.py:192 msgid "No Permission." msgstr "" -#: nova/virt/vmwareapi/error_util.py:198 +#: nova/virt/vmwareapi/error_util.py:197 msgid "Not Authenticated." msgstr "" -#: nova/virt/vmwareapi/error_util.py:203 +#: nova/virt/vmwareapi/error_util.py:202 msgid "Invalid Power State." msgstr "" -#: nova/virt/vmwareapi/error_util.py:228 +#: nova/virt/vmwareapi/error_util.py:227 #, python-format msgid "Fault %s not matched." msgstr "" -#: nova/virt/vmwareapi/fake.py:243 -#, python-format -msgid "Property %(attr)s not set for the managed object %(name)s" -msgstr "" - -#: nova/virt/vmwareapi/fake.py:967 -msgid "There is no VM registered" -msgstr "" - -#: nova/virt/vmwareapi/fake.py:969 nova/virt/vmwareapi/fake.py:1290 -#, python-format -msgid "Virtual Machine with ref %s is not there" -msgstr "" - -#: nova/virt/vmwareapi/fake.py:1052 -#, python-format -msgid "Logging out a session that is invalid or already logged out: %s" -msgstr "" - -#: nova/virt/vmwareapi/fake.py:1070 -msgid "Session Invalid" -msgstr "" - -#: nova/virt/vmwareapi/fake.py:1287 -msgid "No Virtual Machine has been registered yet" -msgstr "" - #: nova/virt/vmwareapi/imagecache.py:74 #, python-format msgid "Unable to delete %(file)s. Exception: %(ex)s" msgstr "" -#: nova/virt/vmwareapi/imagecache.py:148 +#: nova/virt/vmwareapi/imagecache.py:147 #, python-format msgid "Image %s is no longer used by this node. Pending deletion!" msgstr "" -#: nova/virt/vmwareapi/imagecache.py:153 +#: nova/virt/vmwareapi/imagecache.py:152 #, python-format msgid "Image %s is no longer used. Deleting!" msgstr "" -#: nova/virt/vmwareapi/io_util.py:121 +#: nova/virt/vmwareapi/io_util.py:122 #, python-format msgid "Glance image %s is in killed state" msgstr "" -#: nova/virt/vmwareapi/io_util.py:129 +#: nova/virt/vmwareapi/io_util.py:130 #, python-format msgid "Glance image %(image_id)s is in unknown state - %(state)s" msgstr "" @@ -8681,118 +8724,116 @@ msgstr "" msgid "Unable to retrieve value for %(path)s Reason: %(reason)s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:195 +#: nova/virt/vmwareapi/vm_util.py:196 #, python-format msgid "%s is not supported." msgstr "" -#: nova/virt/vmwareapi/vm_util.py:980 +#: nova/virt/vmwareapi/vm_util.py:989 msgid "No host available on cluster" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1210 +#: nova/virt/vmwareapi/vm_util.py:1083 #, python-format msgid "Failed to get cluster references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1222 +#: nova/virt/vmwareapi/vm_util.py:1095 #, python-format msgid "Failed to get resource pool references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1404 +#: nova/virt/vmwareapi/vm_util.py:1285 msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None" msgstr "" -#: nova/virt/vmwareapi/vmops.py:131 +#: nova/virt/vmwareapi/vmops.py:132 #, python-format msgid "Extending virtual disk failed with error: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:246 +#: nova/virt/vmwareapi/vmops.py:249 msgid "Image disk size greater than requested disk size" msgstr "" -#: nova/virt/vmwareapi/vmops.py:471 -#, python-format -msgid "Root disk file creation failed - %s" +#: nova/virt/vmwareapi/vmops.py:856 +msgid "instance is not powered on" msgstr "" -#: nova/virt/vmwareapi/vmops.py:813 -msgid "instance is not powered on" +#: nova/virt/vmwareapi/vmops.py:884 +msgid "Instance does not exist on backend" msgstr "" -#: nova/virt/vmwareapi/vmops.py:869 +#: nova/virt/vmwareapi/vmops.py:916 #, python-format msgid "" "In vmwareapi:vmops:_destroy_instance, got this exception while un-" "registering the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:892 -#, python-format +#: nova/virt/vmwareapi/vmops.py:939 msgid "" -"In vmwareapi:vmops:_destroy_instance, got this exception while deleting " -"the VM contents from the disk: %s" +"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM " +"contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:926 +#: nova/virt/vmwareapi/vmops.py:972 msgid "pause not supported for vmwareapi" msgstr "" -#: nova/virt/vmwareapi/vmops.py:930 +#: nova/virt/vmwareapi/vmops.py:976 msgid "unpause not supported for vmwareapi" msgstr "" -#: nova/virt/vmwareapi/vmops.py:948 +#: nova/virt/vmwareapi/vmops.py:994 msgid "instance is powered off and cannot be suspended." msgstr "" -#: nova/virt/vmwareapi/vmops.py:968 +#: nova/virt/vmwareapi/vmops.py:1014 msgid "instance is not in a suspended state" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1056 +#: nova/virt/vmwareapi/vmops.py:1102 msgid "instance is suspended and cannot be powered off." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1147 +#: nova/virt/vmwareapi/vmops.py:1193 #, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" " the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1213 nova/virt/xenapi/vmops.py:1497 +#: nova/virt/vmwareapi/vmops.py:1255 nova/virt/xenapi/vmops.py:1497 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1217 nova/virt/xenapi/vmops.py:1501 +#: nova/virt/vmwareapi/vmops.py:1259 nova/virt/xenapi/vmops.py:1501 msgid "Automatically hard rebooting" msgstr "" -#: nova/virt/vmwareapi/volumeops.py:217 nova/virt/vmwareapi/volumeops.py:251 +#: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" msgstr "" -#: nova/virt/vmwareapi/volumeops.py:239 nova/virt/vmwareapi/volumeops.py:414 +#: nova/virt/vmwareapi/volumeops.py:363 nova/virt/vmwareapi/volumeops.py:538 msgid "Unable to find iSCSI Target" msgstr "" -#: nova/virt/vmwareapi/volumeops.py:337 +#: nova/virt/vmwareapi/volumeops.py:461 #, python-format msgid "" "The volume's backing has been relocated to %s. Need to consolidate " "backing disk file." msgstr "" -#: nova/virt/vmwareapi/volumeops.py:375 nova/virt/vmwareapi/volumeops.py:422 +#: nova/virt/vmwareapi/volumeops.py:499 nova/virt/vmwareapi/volumeops.py:546 msgid "Unable to find volume" msgstr "" -#: nova/virt/vmwareapi/volumeops.py:395 nova/virt/vmwareapi/volumeops.py:424 +#: nova/virt/vmwareapi/volumeops.py:519 nova/virt/vmwareapi/volumeops.py:548 #: nova/virt/xenapi/volumeops.py:148 #, python-format msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" @@ -8877,16 +8918,16 @@ msgstr "" msgid "Could not determine key: %s" msgstr "" -#: nova/virt/xenapi/driver.py:632 +#: nova/virt/xenapi/driver.py:636 msgid "Host startup on XenServer is not supported." msgstr "" -#: nova/virt/xenapi/fake.py:812 +#: nova/virt/xenapi/fake.py:811 #, python-format msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/virt/xenapi/fake.py:920 +#: nova/virt/xenapi/fake.py:919 #, python-format msgid "" "xenapi.fake does not have an implementation for %s or it has been called " @@ -9011,224 +9052,224 @@ msgid "" "%(version)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:325 nova/virt/xenapi/vm_utils.py:340 +#: nova/virt/xenapi/vm_utils.py:326 nova/virt/xenapi/vm_utils.py:341 msgid "VM already halted, skipping shutdown..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:392 +#: nova/virt/xenapi/vm_utils.py:393 #, python-format msgid "VBD %s already detached" msgstr "" -#: nova/virt/xenapi/vm_utils.py:395 +#: nova/virt/xenapi/vm_utils.py:396 #, python-format msgid "" "VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt " "%(num_attempt)d/%(max_attempts)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:402 +#: nova/virt/xenapi/vm_utils.py:403 #, python-format msgid "Unable to unplug VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:405 +#: nova/virt/xenapi/vm_utils.py:406 #, python-format msgid "Reached maximum number of retries trying to unplug VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:417 +#: nova/virt/xenapi/vm_utils.py:418 #, python-format msgid "Unable to destroy VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:470 +#: nova/virt/xenapi/vm_utils.py:471 #, python-format msgid "Unable to destroy VDI %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:516 +#: nova/virt/xenapi/vm_utils.py:517 msgid "SR not present and could not be introduced" msgstr "" -#: nova/virt/xenapi/vm_utils.py:700 +#: nova/virt/xenapi/vm_utils.py:701 #, python-format msgid "No primary VDI found for %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:792 +#: nova/virt/xenapi/vm_utils.py:793 #, python-format msgid "" "Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s" " is of type %(type)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:871 +#: nova/virt/xenapi/vm_utils.py:872 #, python-format msgid "Multiple base images for image: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:926 +#: nova/virt/xenapi/vm_utils.py:927 #, python-format msgid "" "VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor " "size of %(new_disk_size)d bytes." msgstr "" -#: nova/virt/xenapi/vm_utils.py:937 nova/virt/xenapi/vmops.py:1037 +#: nova/virt/xenapi/vm_utils.py:938 nova/virt/xenapi/vmops.py:1037 msgid "Can't resize a disk to 0 GB." msgstr "" -#: nova/virt/xenapi/vm_utils.py:989 +#: nova/virt/xenapi/vm_utils.py:990 msgid "Disk must have only one partition." msgstr "" -#: nova/virt/xenapi/vm_utils.py:994 +#: nova/virt/xenapi/vm_utils.py:995 #, python-format msgid "Disk contains a filesystem we are unable to resize: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:999 +#: nova/virt/xenapi/vm_utils.py:1000 msgid "The only partition should be partition 1." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1010 +#: nova/virt/xenapi/vm_utils.py:1011 #, python-format msgid "Attempted auto_configure_disk failed because: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1261 +#: nova/virt/xenapi/vm_utils.py:1262 #, python-format msgid "" "Fast cloning is only supported on default local SR of type ext. SR on " "this system was found to be of type %s. Ignoring the cow flag." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1336 +#: nova/virt/xenapi/vm_utils.py:1337 #, python-format msgid "Unrecognized cache_images value '%s', defaulting to True" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1412 +#: nova/virt/xenapi/vm_utils.py:1413 #, python-format msgid "Invalid value '%s' for torrent_images" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1435 +#: nova/virt/xenapi/vm_utils.py:1436 #, python-format msgid "Invalid value '%d' for image_compression_level" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1461 +#: nova/virt/xenapi/vm_utils.py:1462 #, python-format msgid "" "Download handler '%(handler)s' raised an exception, falling back to " "default handler '%(default_handler)s'" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1517 +#: nova/virt/xenapi/vm_utils.py:1518 #, python-format msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1568 +#: nova/virt/xenapi/vm_utils.py:1569 #, python-format msgid "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " "bytes" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1610 +#: nova/virt/xenapi/vm_utils.py:1611 msgid "Failed to fetch glance image" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1818 +#: nova/virt/xenapi/vm_utils.py:1819 #, python-format msgid "Unable to parse rrd of %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1848 +#: nova/virt/xenapi/vm_utils.py:1849 #, python-format msgid "Retry SR scan due to error: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1881 +#: nova/virt/xenapi/vm_utils.py:1882 #, python-format msgid "Flag sr_matching_filter '%s' does not respect formatting convention" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1902 +#: nova/virt/xenapi/vm_utils.py:1903 msgid "" "XenAPI is unable to find a Storage Repository to install guest instances " "on. Please check your configuration (e.g. set a default SR for the pool) " "and/or configure the flag 'sr_matching_filter'." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1915 +#: nova/virt/xenapi/vm_utils.py:1916 msgid "Cannot find SR of content-type ISO" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1968 +#: nova/virt/xenapi/vm_utils.py:1969 #, python-format msgid "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " "%(server)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2096 +#: nova/virt/xenapi/vm_utils.py:2097 #, python-format msgid "VHD coalesce attempts exceeded (%d), giving up..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2131 +#: nova/virt/xenapi/vm_utils.py:2132 #, python-format msgid "Timeout waiting for device %s to be created" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2151 +#: nova/virt/xenapi/vm_utils.py:2152 #, python-format msgid "Disconnecting stale VDI %s from compute domU" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2309 +#: nova/virt/xenapi/vm_utils.py:2310 msgid "" "Shrinking the filesystem down with resize2fs has failed, please check if " "you have enough free space on your disk." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2444 +#: nova/virt/xenapi/vm_utils.py:2445 msgid "Manipulating interface files directly" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2453 +#: nova/virt/xenapi/vm_utils.py:2454 #, python-format msgid "Failed to mount filesystem (expected for non-linux instances): %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2564 +#: nova/virt/xenapi/vm_utils.py:2566 msgid "This domU must be running on the host specified by connection_url" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2633 +#: nova/virt/xenapi/vm_utils.py:2635 msgid "Failed to transfer vhd to new host" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2659 +#: nova/virt/xenapi/vm_utils.py:2661 msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2665 +#: nova/virt/xenapi/vm_utils.py:2667 msgid "ipxe_network_name not set, user will have to enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2676 +#: nova/virt/xenapi/vm_utils.py:2678 #, python-format msgid "" "Unable to find network matching '%(network_name)s', user will have to " "enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2700 +#: nova/virt/xenapi/vm_utils.py:2702 #, python-format msgid "ISO creation tool '%s' does not exist." msgstr "" diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po index de1d48a079..da03d9857d 100644 --- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/" @@ -44,6 +44,11 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Falha ao notificar células de falha da instância" @@ -58,7 +63,7 @@ msgstr "Exceção original sendo descartada: %s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "Exceção não esperada ocorreu %d vez(es)... tentando novamente." -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -71,22 +76,22 @@ msgstr "em uma chamada de laço de duração fixa" msgid "in dynamic looping call" msgstr "em chamada de laço dinâmico" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "Erro durante %(full_task_name)s: %(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "Falha ao interpretar regra %s" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "Nenhum manipulador para correspondências do tipo %s" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "Falha ao interpretar regra %r" @@ -116,137 +121,133 @@ msgstr "Exceção de BD incluída." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -266,20 +267,20 @@ msgstr "" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -288,12 +289,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po index 29c655d5b6..9694439001 100644 --- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-30 05:01+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/" "language/pt_BR/)\n" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "Criado caminho de lock: %s" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "Ignorando tarefa periódica %(task)s porque seu intervalo é negativo" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "Ignorando tarefa periódica %(task)s porque ela está desativada" @@ -101,97 +107,102 @@ msgstr "Excluindo linha duplicada com ID: %(id)s da tabela: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "Instância destruída com êxito." -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "A instância pode ser iniciada novamente." -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "Destruindo a instância novamente." -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "Começando o processo de captura instantânea em tempo real" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "Iniciando processo de captura instantânea a frio" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "Captura instantânea extraída, iniciando upload da imagem" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "Upload da imagem de captura instantânea concluído" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "Reinicialização virtual da instância bem-sucedida." -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "A instância foi encerrada com êxito." -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "A instância pode ter sido reinicializada durante a reinicialização virtual, " "portanto retorne agora." -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "Instância reinicializada com êxito." -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "Feito spawn da instância com êxito." -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "dados: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Log do console truncado retornado, %d bytes ignorados" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "Criando imagem" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "Usando unidade de configuração" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "Criando unidade de configuração em %(path)s" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -200,26 +211,26 @@ msgstr "" "Não foi possível localizar o domínio em libvirt para a instância %s. Não é " "possível obter estatísticas do bloco para o dispositivo" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "Instância executando com êxito." -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -232,7 +243,7 @@ msgstr "Chamada setup_basic_filtering em nwfilter" msgid "Ensuring static filters" msgstr "Assegurando filtros estáticos" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "Tentou cancelar a filtragem da instância que não foi filtrada" @@ -293,11 +304,11 @@ msgstr "Arquivos base corrompidos: %s" msgid "Removable base files: %s" msgstr "Arquivos base removíveis: %s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po index 07779ffe21..0eebfc183b 100644 --- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:08+0000\n" +"POT-Creation-Date: 2014-07-21 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/" @@ -44,6 +44,11 @@ msgstr "" msgid "Keystone failure: %s" msgstr "" +#: nova/compute/manager.py:5416 +msgid "" +"Periodic sync_power_state task had an error while processing an instance." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "未能通知单元有关实例故障的事项" @@ -58,7 +63,7 @@ msgstr "正在删除原始异常:%s" msgid "Unexpected exception occurred %d time(s)... retrying." msgstr "意外的异常已发生 %d 次...正在重试。" -#: nova/openstack/common/lockutils.py:120 +#: nova/openstack/common/lockutils.py:119 #, python-format msgid "Could not release the acquired lock `%s`" msgstr "" @@ -71,22 +76,22 @@ msgstr "在固定时段内循环调用" msgid "in dynamic looping call" msgstr "在动态循环调用中" -#: nova/openstack/common/periodic_task.py:179 +#: nova/openstack/common/periodic_task.py:202 #, python-format msgid "Error during %(full_task_name)s: %(e)s" msgstr "在 %(full_task_name)s 期间发生错误:%(e)s" -#: nova/openstack/common/policy.py:511 +#: nova/openstack/common/policy.py:507 #, python-format msgid "Failed to understand rule %s" msgstr "未能理解规则 %s" -#: nova/openstack/common/policy.py:521 +#: nova/openstack/common/policy.py:517 #, python-format msgid "No handler for matches of kind %s" msgstr "对于类型为 %s 的匹配项,不存在任何处理程序" -#: nova/openstack/common/policy.py:791 +#: nova/openstack/common/policy.py:787 #, python-format msgid "Failed to understand rule %r" msgstr "未能理解规则 %r " @@ -116,137 +121,133 @@ msgstr "数据库异常被包裹。" msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:625 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:749 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:873 +#: nova/virt/libvirt/driver.py:937 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:889 -msgid "During wait destroy, instance disappeared." -msgstr "" - -#: nova/virt/libvirt/driver.py:951 +#: nova/virt/libvirt/driver.py:1015 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:977 +#: nova/virt/libvirt/driver.py:1041 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1389 +#: nova/virt/libvirt/driver.py:1445 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1414 +#: nova/virt/libvirt/driver.py:1470 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1663 +#: nova/virt/libvirt/driver.py:1719 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1749 +#: nova/virt/libvirt/driver.py:1827 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1755 +#: nova/virt/libvirt/driver.py:1833 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1804 +#: nova/virt/libvirt/driver.py:1882 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:1951 +#: nova/virt/libvirt/driver.py:2028 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421 +#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2542 +#: nova/virt/libvirt/driver.py:2619 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2693 +#: nova/virt/libvirt/driver.py:2770 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2786 +#: nova/virt/libvirt/driver.py:2863 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3553 +#: nova/virt/libvirt/driver.py:3645 #, python-format msgid "An error occurred while trying to define a domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3562 +#: nova/virt/libvirt/driver.py:3654 #, python-format msgid "An error occurred while trying to launch a defined domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3571 +#: nova/virt/libvirt/driver.py:3663 #, python-format msgid "An error occurred while enabling hairpin mode on domain with xml: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3589 +#: nova/virt/libvirt/driver.py:3681 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3904 +#: nova/virt/libvirt/driver.py:3988 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4481 +#: nova/virt/libvirt/driver.py:4667 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5231 +#: nova/virt/libvirt/driver.py:5419 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:202 +#: nova/virt/libvirt/imagebackend.py:201 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:230 +#: nova/virt/libvirt/imagebackend.py:229 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:501 +#: nova/virt/libvirt/imagebackend.py:505 #, python-format msgid "error opening rbd image %s" msgstr "" @@ -266,20 +267,20 @@ msgstr "" msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" -#: nova/virt/libvirt/lvm.py:201 +#: nova/virt/libvirt/lvm.py:200 #, python-format msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572 -#: nova/virt/libvirt/vif.py:596 +#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 +#: nova/virt/libvirt/vif.py:562 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676 -#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717 -#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762 -#: nova/virt/libvirt/vif.py:784 +#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 +#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 +#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:737 msgid "Failed while unplugging vif" msgstr "" @@ -288,12 +289,18 @@ msgstr "" msgid "Unknown content in connection_info/access_mode: %s" msgstr "" -#: nova/virt/libvirt/volume.py:666 +#: nova/virt/libvirt/volume.py:669 #, python-format msgid "Couldn't unmount the NFS share %s" msgstr "" -#: nova/virt/libvirt/volume.py:815 +#: nova/virt/libvirt/volume.py:818 #, python-format msgid "Couldn't unmount the GlusterFS share %s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:500 +#, python-format +msgid "" +"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" +msgstr "" diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po index ce4fc2c650..9b2f86ba87 100644 --- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" -"PO-Revision-Date: 2014-06-30 05:01+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/" "language/zh_CN/)\n" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "已创建锁路径:%s" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "正在跳过周期性任务 %(task)s,因为其时间间隔为负" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "正在跳过周期性任务 %(task)s,因为它已禁用" @@ -101,121 +107,126 @@ msgstr "正在从表 %(table)s 中删除具有id %(id)s 的重复行" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "实例销毁成功。" -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "可再次启动实例。" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "将再次销毁实例。" -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "正在开始实时快照流程" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "正在结束冷快照流程" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "已抽取快照,正在开始映像上载" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "快照映像上载完成" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "已成功执行实例软重新引导。" -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "已成功关闭实例。" -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "在软重新引导期间,可能已重新引导实例,因此会立即返回。" -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "实例成功重启。" -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "实例成功生产。" -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data:%(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "已返回截断的控制台日志,忽略了 %d 个字节" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "正在创建镜像" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "正在使用配置驱动器" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "正在 %(path)s 处创建配置驱动器" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "对于实例 %s,在 libvirt 中找不到域。无法获取设备的块统计信息" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "实例正在成功运行。" -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -228,7 +239,7 @@ msgstr "在 nwfilter 里调用 setup_basic_filtering" msgid "Ensuring static filters" msgstr "正在确保静态过滤器" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "试图不过滤没有过滤的实例" @@ -287,11 +298,11 @@ msgstr "损坏的基文件:%s" msgid "Removable base files: %s" msgstr "可删除的基文件:%s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po index 3366c8bcfe..0c21ef667d 100644 --- a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-06-30 06:07+0000\n" +"POT-Creation-Date: 2014-07-21 06:03+0000\n" "PO-Revision-Date: 2014-06-18 19:31+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/nova/" @@ -19,27 +19,33 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" +#: nova/compute/manager.py:5422 +#, python-format +msgid "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "" -#: nova/openstack/common/lockutils.py:83 +#: nova/openstack/common/lockutils.py:82 #, python-format msgid "Created lock path: %s" msgstr "" -#: nova/openstack/common/lockutils.py:250 +#: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" msgstr "" -#: nova/openstack/common/periodic_task.py:125 +#: nova/openstack/common/periodic_task.py:126 #, python-format msgid "Skipping periodic task %(task)s because its interval is negative" msgstr "正在跳過定期作業 %(task)s,因為其間隔為負數" -#: nova/openstack/common/periodic_task.py:130 +#: nova/openstack/common/periodic_task.py:131 #, python-format msgid "Skipping periodic task %(task)s because it is disabled" msgstr "正在跳過定期作業 %(task)s,因為它已停用" @@ -101,121 +107,126 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/libvirt/driver.py:894 +#: nova/virt/firewall.py:446 +#, python-format +msgid "instance chain %s disappeared during refresh, skipping" +msgstr "" + +#: nova/virt/libvirt/driver.py:839 +#, python-format +msgid "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:958 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:904 +#: nova/virt/libvirt/driver.py:968 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:914 +#: nova/virt/libvirt/driver.py:978 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1518 +#: nova/virt/libvirt/driver.py:1574 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1521 +#: nova/virt/libvirt/driver.py:1577 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1550 +#: nova/virt/libvirt/driver.py:1606 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1562 +#: nova/virt/libvirt/driver.py:1618 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:1972 +#: nova/virt/libvirt/driver.py:2049 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2015 +#: nova/virt/libvirt/driver.py:2092 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2023 +#: nova/virt/libvirt/driver.py:2100 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2091 +#: nova/virt/libvirt/driver.py:2168 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2259 +#: nova/virt/libvirt/driver.py:2336 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2275 +#: nova/virt/libvirt/driver.py:2352 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341 +#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2568 +#: nova/virt/libvirt/driver.py:2645 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2677 +#: nova/virt/libvirt/driver.py:2754 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2686 +#: nova/virt/libvirt/driver.py:2763 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3223 +#: nova/virt/libvirt/driver.py:3315 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821 -#: nova/virt/libvirt/driver.py:3849 -#, python-format -msgid "libvirt can't find a domain with id: %s" -msgstr "" - -#: nova/virt/libvirt/driver.py:4109 +#: nova/virt/libvirt/driver.py:4193 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4115 +#: nova/virt/libvirt/driver.py:4199 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4330 +#: nova/virt/libvirt/driver.py:4441 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:4986 +#: nova/virt/libvirt/driver.py:5174 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5226 +#: nova/virt/libvirt/driver.py:5414 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5238 +#: nova/virt/libvirt/driver.py:5426 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5241 +#: nova/virt/libvirt/driver.py:5429 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -228,7 +239,7 @@ msgstr "" msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:306 +#: nova/virt/libvirt/firewall.py:304 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -287,11 +298,11 @@ msgstr "已毀損的基本檔案:%s" msgid "Removable base files: %s" msgstr "可移除的基本檔案:%s" -#: nova/virt/libvirt/utils.py:530 +#: nova/virt/libvirt/utils.py:536 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1352 +#: nova/virt/xenapi/vm_utils.py:1353 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " From cddb01e2a4249509f11a91b11f44d590e77d4c04 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Wed, 19 Mar 2014 04:46:56 +0200 Subject: [PATCH 101/486] Add differencing vhdx resize support in Hyper-V Driver Differencing VHDX images can be resized, unlike differencing VHD images. Even so, the Nova Hyper-V driver currently does not support this. This feature is required for resizing existing instances which use CoW VHDX images and also in order to resize the root disk image when spawning a new instance. The methods which get the internal maximum size of a vhd/vhdx image have been updated to lookup the parent disk and get the according size instead of raising an exception in case of differencing images. Change-Id: I867a9b35e5273b96afc443e5c436a15d0c45161b Implements: blueprint add-differencing-vhdx-resize-support --- nova/tests/virt/hyperv/test_hypervapi.py | 20 ++++++-- nova/tests/virt/hyperv/test_vhdutils.py | 40 +++++++++++++-- nova/tests/virt/hyperv/test_vhdutilsv2.py | 28 ++++++++--- nova/virt/hyperv/vhdutils.py | 11 +++-- nova/virt/hyperv/vhdutilsv2.py | 10 ++-- nova/virt/hyperv/vmops.py | 60 ++++++++++++++++------- 6 files changed, 125 insertions(+), 44 deletions(-) diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 28fa5f7300..a37ed6b63d 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -1022,15 +1022,25 @@ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None, m.AndReturn(self._test_instance_dir) self._setup_get_cached_image_mocks(cow, vhd_format) + m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str)) + m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024, + 'Type': 2}) if cow: - vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str), - mox.IsA(str)) + m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str)) + m.AndReturn(vhd_format) + if vhd_format == constants.DISK_FORMAT_VHD: + vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str), + mox.IsA(str)) + else: + m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size( + mox.IsA(str), mox.IsA(object)) + m.AndReturn(1025) + vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str), + mox.IsA(str), + mox.IsA(int)) else: fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str)) - m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str)) - m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024, - 'Type': 2}) m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size( mox.IsA(str), mox.IsA(object)) m.AndReturn(1025) diff --git a/nova/tests/virt/hyperv/test_vhdutils.py b/nova/tests/virt/hyperv/test_vhdutils.py index c08a8902e6..e41353329a 100644 --- a/nova/tests/virt/hyperv/test_vhdutils.py +++ b/nova/tests/virt/hyperv/test_vhdutils.py @@ -24,6 +24,7 @@ class VHDUtilsTestCase(test.NoDBTestCase): """Unit tests for the Hyper-V VHDUtils class.""" _FAKE_VHD_PATH = "C:\\fake_path.vhdx" + _FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx" _FAKE_FORMAT = 3 _FAKE_MAK_INTERNAL_SIZE = 1000 _FAKE_JOB_PATH = 'fake_job_path' @@ -51,6 +52,26 @@ def test_create_dynamic_vhd(self): Path=self._FAKE_VHD_PATH, MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE) + def test_create_differencing_vhd(self): + mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0] + mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + + self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH, + self._FAKE_PARENT_PATH) + + mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with( + Path=self._FAKE_VHD_PATH, + ParentPath=self._FAKE_PARENT_PATH) + + def test_create_differencing_vhd_with_new_size(self): + fake_new_size = 1024 + self.assertRaises(vmutils.HyperVException, + self._vhdutils.create_differencing_vhd, + self._FAKE_VHD_PATH, + self._FAKE_PARENT_PATH, + fake_new_size) + def test_get_internal_vhd_size_by_file_size_fixed(self): vhdutil = vhdutils.VHDUtils() root_vhd_size = 1 * 1024 ** 3 @@ -76,15 +97,24 @@ def test_get_internal_vhd_size_by_file_size_dynamic(self): expected_vhd_size = 20 * 1024 ** 3 - 43008 self.assertEqual(expected_vhd_size, real_size) - def test_get_internal_vhd_size_by_file_size_unsupported(self): + def test_get_internal_vhd_size_by_file_size_differencing(self): + # For differencing images, the internal size of the parent vhd + # is returned vhdutil = vhdutils.VHDUtils() root_vhd_size = 20 * 1024 ** 3 vhdutil.get_vhd_info = mock.MagicMock() - vhdutil.get_vhd_info.return_value = {'Type': 5} + vhdutil.get_vhd_parent_path = mock.MagicMock() + vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH + vhdutil.get_vhd_info.side_effect = [ + {'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}] - self.assertRaises(vmutils.HyperVException, - vhdutil.get_internal_vhd_size_by_file_size, - None, root_vhd_size) + vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock() + vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152 + + real_size = vhdutil.get_internal_vhd_size_by_file_size(None, + root_vhd_size) + expected_vhd_size = 20 * 1024 ** 3 - 43008 + self.assertEqual(expected_vhd_size, real_size) def test_get_vhd_format_vhdx(self): with mock.patch('nova.virt.hyperv.vhdutils.open', diff --git a/nova/tests/virt/hyperv/test_vhdutilsv2.py b/nova/tests/virt/hyperv/test_vhdutilsv2.py index 4058654c9d..d813057724 100644 --- a/nova/tests/virt/hyperv/test_vhdutilsv2.py +++ b/nova/tests/virt/hyperv/test_vhdutilsv2.py @@ -35,6 +35,11 @@ class VHDUtilsV2TestCase(test.NoDBTestCase): _FAKE_LOG_SIZE = 1048576 _FAKE_LOGICAL_SECTOR_SIZE = 4096 _FAKE_METADATA_SIZE = 1048576 + _FAKE_VHD_INFO = {'ParentPath': _FAKE_PARENT_VHD_PATH, + 'Format': _FAKE_FORMAT, + 'BlockSize': _FAKE_BLOCK_SIZE, + 'LogicalSectorSize': _FAKE_LOGICAL_SECTOR_SIZE, + 'Type': _FAKE_TYPE} def setUp(self): self._vhdutils = vhdutilsv2.VHDUtilsV2() @@ -166,13 +171,16 @@ def test_resize_vhd(self): self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE) - def test_get_vhdx_internal_size(self): - self._vhdutils.get_vhd_info = mock.MagicMock( - return_value={'ParentPath': self._FAKE_PARENT_VHD_PATH, - 'Format': self._FAKE_FORMAT, - 'BlockSize': self._FAKE_BLOCK_SIZE, - 'LogicalSectorSize': self._FAKE_LOGICAL_SECTOR_SIZE, - 'Type': self._FAKE_TYPE}) + def _test_get_vhdx_internal_size(self, vhd_type): + self._vhdutils.get_vhd_info = mock.MagicMock() + self._vhdutils.get_vhd_parent_path = mock.Mock( + return_value=self._FAKE_PARENT_VHD_PATH) + + if vhd_type == 4: + self._vhdutils.get_vhd_info.side_effect = [ + {'Type': vhd_type}, self._FAKE_VHD_INFO] + else: + self._vhdutils.get_vhd_info.return_value = self._FAKE_VHD_INFO self._vhdutils._get_vhdx_log_size = mock.MagicMock( return_value=self._FAKE_LOG_SIZE) self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock( @@ -189,6 +197,12 @@ def test_get_vhdx_internal_size(self): self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE, internal_size) + def test_get_vhdx_internal_size_dynamic(self): + self._test_get_vhdx_internal_size(3) + + def test_get_vhdx_internal_size_differencing(self): + self._test_get_vhdx_internal_size(4) + def test_get_vhdx_current_header(self): VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024] fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00', diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py index 801533ba80..af611efb6a 100644 --- a/nova/virt/hyperv/vhdutils.py +++ b/nova/virt/hyperv/vhdutils.py @@ -72,7 +72,10 @@ def create_dynamic_vhd(self, path, max_internal_size, format): Path=path, MaxInternalSize=max_internal_size) self._vmutils.check_ret_val(ret_val, job_path) - def create_differencing_vhd(self, path, parent_path): + def create_differencing_vhd(self, path, parent_path, size=None): + if size is not None: + raise vmutils.HyperVException(_('VHD differencing disks cannot be ' + 'resized')) image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk( @@ -148,9 +151,9 @@ def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size): (hs + ddhs + fs)) * bs / (bes + bs) return max_internal_size else: - raise vmutils.HyperVException(_("The %(vhd_type)s type VHD " - "is not supported") % - {"vhd_type": vhd_type}) + vhd_parent = self.get_vhd_parent_path(vhd_path) + return self.get_internal_vhd_size_by_file_size(vhd_parent, + new_vhd_file_size) def _get_vhd_dynamic_blk_size(self, vhd_path): blk_size_offset = VHD_BLK_SIZE_OFFSET diff --git a/nova/virt/hyperv/vhdutilsv2.py b/nova/virt/hyperv/vhdutilsv2.py index 8d865aac24..9c26861cc1 100644 --- a/nova/virt/hyperv/vhdutilsv2.py +++ b/nova/virt/hyperv/vhdutilsv2.py @@ -67,11 +67,12 @@ def create_dynamic_vhd(self, path, max_internal_size, format): self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path, max_internal_size=max_internal_size) - def create_differencing_vhd(self, path, parent_path): + def create_differencing_vhd(self, path, parent_path, size=None): parent_vhd_info = self.get_vhd_info(parent_path) self._create_vhd(self._VHD_TYPE_DIFFERENCING, parent_vhd_info["Format"], - path, parent_path=parent_path) + path, parent_path=parent_path, + max_internal_size=size) def _create_vhd(self, vhd_type, format, path, max_internal_size=None, parent_path=None): @@ -132,8 +133,9 @@ def get_internal_vhd_size_by_file_size(self, vhd_path, vhd_info = self.get_vhd_info(vhd_path) vhd_type = vhd_info['Type'] if vhd_type == self._VHD_TYPE_DIFFERENCING: - raise vmutils.HyperVException(_("Differencing VHDX images " - "are not supported")) + vhd_parent = self.get_vhd_parent_path(vhd_path) + return self.get_internal_vhd_size_by_file_size(vhd_parent, + new_vhd_file_size) else: try: with open(vhd_path, 'rb') as f: diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 177696d6cb..6f12429770 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -136,9 +136,12 @@ def get_info(self, instance): def _create_root_vhd(self, context, instance): base_vhd_path = self._imagecache.get_cached_image(context, instance) + base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) + base_vhd_size = base_vhd_info['MaxInternalSize'] format_ext = base_vhd_path.split('.')[-1] root_vhd_path = self._pathutils.get_root_vhd_path(instance['name'], format_ext) + root_vhd_size = instance['root_gb'] * units.Gi try: if CONF.use_cow_images: @@ -147,8 +150,24 @@ def _create_root_vhd(self, context, instance): {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) - self._vhdutils.create_differencing_vhd(root_vhd_path, - base_vhd_path) + vhd_type = self._vhdutils.get_vhd_format(base_vhd_path) + if vhd_type == constants.DISK_FORMAT_VHDX: + # Differencing vhdx images can be resized, so we use + # the flavor size when creating the root image + root_vhd_internal_size = ( + self._vhdutils.get_internal_vhd_size_by_file_size( + base_vhd_path, root_vhd_size)) + if not self._is_resize_needed(root_vhd_path, base_vhd_size, + root_vhd_internal_size, + instance): + root_vhd_internal_size = None + + self._vhdutils.create_differencing_vhd( + root_vhd_path, base_vhd_path, root_vhd_internal_size) + else: + # The base image had already been resized + self._vhdutils.create_differencing_vhd(root_vhd_path, + base_vhd_path) else: LOG.debug("Copying VHD image %(base_vhd_path)s to target: " "%(root_vhd_path)s", @@ -157,27 +176,13 @@ def _create_root_vhd(self, context, instance): instance=instance) self._pathutils.copyfile(base_vhd_path, root_vhd_path) - base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) - base_vhd_size = base_vhd_info['MaxInternalSize'] - root_vhd_size = instance['root_gb'] * units.Gi - root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( root_vhd_path, root_vhd_size)) - if root_vhd_internal_size < base_vhd_size: - error_msg = _("Cannot resize a VHD to a smaller size, the" - " original size is %(base_vhd_size)s, the" - " newer size is %(root_vhd_size)s" - ) % {'base_vhd_size': base_vhd_size, - 'root_vhd_size': root_vhd_internal_size} - raise vmutils.HyperVException(error_msg) - elif root_vhd_internal_size > base_vhd_size: - LOG.debug("Resizing VHD %(root_vhd_path)s to new " - "size %(root_vhd_size)s", - {'root_vhd_size': root_vhd_internal_size, - 'root_vhd_path': root_vhd_path}, - instance=instance) + if self._is_resize_needed(root_vhd_path, base_vhd_size, + root_vhd_internal_size, + instance): self._vhdutils.resize_vhd(root_vhd_path, root_vhd_internal_size, is_file_max_size=False) @@ -188,6 +193,23 @@ def _create_root_vhd(self, context, instance): return root_vhd_path + def _is_resize_needed(self, vhd_path, old_size, new_size, instance): + if new_size < old_size: + error_msg = _("Cannot resize a VHD to a smaller size, the" + " original size is %(old_size)s, the" + " newer size is %(new_size)s" + ) % {'old_size': old_size, + 'new_size': new_size} + raise vmutils.VHDResizeException(error_msg) + elif new_size > old_size: + LOG.debug("Resizing VHD %(vhd_path)s to new " + "size %(new_size)s" % + {'new_size': new_size, + 'vhd_path': vhd_path}, + instance=instance) + return True + return False + def create_ephemeral_vhd(self, instance): eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi if eph_vhd_size: From d5a70de4793a1e44056c55121505edc63fd36969 Mon Sep 17 00:00:00 2001 From: "Leandro I. Costantino" Date: Sat, 8 Feb 2014 12:23:30 -0300 Subject: [PATCH 102/486] Add APIv2 support to make host optional on evacuate In the event of an unrecoverable hardware failure, support needs to relocate an instance to another compute so it can be rebuilt. The changes involved in this patch are: [*] Add a new v2 extension to determine that the host argument on evacuate is now optional.(Extended_evacuate_find_host) [*] Doc regeneration. DocImpact: The evacuate target host is now optional. If 'host' field is not sent in the request, the scheduler will determine the target host. This will include nova client changes ( on the proper commit ) to support this new optional parameter. Implements: blueprint find-host-and-evacuate-instance Change-Id: Ib34fb3120263b746ad2f8fe89c14137e36a07a53 Co-Authored-By: Juan M. Olle Co-Authored-By: Andres Buraschi Co-Authored-By: Anuj Mathur Co-Authored-By: Navneet Kumar Co-Authored-By: Claxton Correya --- .../all_extensions/extensions-get-resp.json | 8 ++ .../all_extensions/extensions-get-resp.xml | 5 + .../server-evacuate-find-host-req.json | 6 + .../server-evacuate-find-host-req.xml | 4 + .../server-evacuate-find-host-resp.json | 3 + .../server-evacuate-find-host-resp.xml | 2 + .../server-post-req.json | 16 +++ .../server-post-req.xml | 19 +++ .../server-post-resp.json | 16 +++ .../server-post-resp.xml | 6 + .../api/openstack/compute/contrib/evacuate.py | 27 +++-- .../contrib/extended_evacuate_find_host.py | 26 ++++ .../compute/contrib/test_evacuate.py | 5 + .../test_extended_evacuate_find_host.py | 114 ++++++++++++++++++ .../extensions-get-resp.json.tpl | 8 ++ .../extensions-get-resp.xml.tpl | 3 + .../server-evacuate-find-host-req.json.tpl | 6 + .../server-evacuate-find-host-req.xml.tpl | 5 + .../server-evacuate-find-host-resp.json.tpl | 3 + .../server-evacuate-find-host-resp.xml.tpl | 2 + .../server-post-req.json.tpl | 16 +++ .../server-post-req.xml.tpl | 19 +++ .../server-post-resp.json.tpl | 16 +++ .../server-post-resp.xml.tpl | 6 + nova/tests/integrated/test_api_samples.py | 47 ++++++++ 25 files changed, 379 insertions(+), 9 deletions(-) create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-post-req.json create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json create mode 100644 doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml create mode 100644 nova/api/openstack/compute/contrib/extended_evacuate_find_host.py create mode 100644 nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json index 99229da36b..4d4967accf 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.json +++ b/doc/api_samples/all_extensions/extensions-get-resp.json @@ -272,6 +272,14 @@ "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2", "updated": "2013-01-06T00:00:00Z" }, + { + "alias": "os-extended-evacuate-find-host", + "description": "Enables server evacuation without target host. Scheduler will select\n one to target.\n ", + "links": [], + "name": "ExtendedEvacuateFindHost", + "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2", + "updated": "2014-02-12T00:00:00Z" + }, { "alias": "os-extended-floating-ips", "description": "Adds optional fixed_address to the add floating IP command.", diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml index eb4e6e32d3..d69e8ab40a 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.xml +++ b/doc/api_samples/all_extensions/extensions-get-resp.xml @@ -118,6 +118,11 @@ Enables server evacuation. + + Enables server evacuation without target host. Scheduler will select + one to target. + + Adds optional fixed_address to the add floating IP command. diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json new file mode 100644 index 0000000000..e9ee83481c --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json @@ -0,0 +1,6 @@ +{ + "evacuate": { + "adminPass": "MySecretPass", + "onSharedStorage": "False" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml new file mode 100644 index 0000000000..4faf14a785 --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml @@ -0,0 +1,4 @@ + + \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json new file mode 100644 index 0000000000..6cd942395f --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json @@ -0,0 +1,3 @@ +{ + "adminPass": "MySecretPass" +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml new file mode 100644 index 0000000000..5823886702 --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml @@ -0,0 +1,2 @@ + +MySecretPass \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-req.json b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.json new file mode 100644 index 0000000000..d88eb41222 --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.json @@ -0,0 +1,16 @@ +{ + "server" : { + "name" : "new-server-test", + "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", + "metadata" : { + "My Server Name" : "Apache1" + }, + "personality" : [ + { + "path" : "/etc/banner.txt", + "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml new file mode 100644 index 0000000000..0a3c8bb530 --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml @@ -0,0 +1,19 @@ + + + + Apache1 + + + + ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp + dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k + IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs + c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g + QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo + ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv + dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy + c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 + b25zLiINCg0KLVJpY2hhcmQgQmFjaA== + + + \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json new file mode 100644 index 0000000000..e07dceaeaa --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json @@ -0,0 +1,16 @@ +{ + "server": { + "adminPass": "y6hsKno56L6R", + "id": "1c650ba2-6a76-41d1-805c-64f4e312200e", + "links": [ + { + "href": "http://openstack.example.com/v2/openstack/servers/1c650ba2-6a76-41d1-805c-64f4e312200e", + "rel": "self" + }, + { + "href": "http://openstack.example.com/openstack/servers/1c650ba2-6a76-41d1-805c-64f4e312200e", + "rel": "bookmark" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml new file mode 100644 index 0000000000..ad40d9e731 --- /dev/null +++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/nova/api/openstack/compute/contrib/evacuate.py b/nova/api/openstack/compute/contrib/evacuate.py index cd6f8f4ccb..ba5e62ca4a 100644 --- a/nova/api/openstack/compute/contrib/evacuate.py +++ b/nova/api/openstack/compute/contrib/evacuate.py @@ -28,15 +28,17 @@ class Controller(wsgi.Controller): - def __init__(self, *args, **kwargs): + def __init__(self, ext_mgr, *args, **kwargs): super(Controller, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.host_api = compute.HostAPI() + self.ext_mgr = ext_mgr @wsgi.action('evacuate') def _evacuate(self, req, id, body): """Permit admins to evacuate a server from a failed host to a new one. + If host is empty, the scheduler will select one. """ context = req.environ["nova.context"] authorize(context) @@ -45,12 +47,18 @@ def _evacuate(self, req, id, body): raise exc.HTTPBadRequest(_("Malformed request body")) evacuate_body = body["evacuate"] + host = evacuate_body.get("host") + + if (not host and + not self.ext_mgr.is_loaded('os-extended-evacuate-find-host')): + msg = _("host must be specified.") + raise exc.HTTPBadRequest(explanation=msg) + try: - host = evacuate_body["host"] on_shared_storage = strutils.bool_from_string( evacuate_body["onSharedStorage"]) except (TypeError, KeyError): - msg = _("host and onSharedStorage must be specified.") + msg = _("onSharedStorage must be specified.") raise exc.HTTPBadRequest(explanation=msg) password = None @@ -65,11 +73,12 @@ def _evacuate(self, req, id, body): elif not on_shared_storage: password = utils.generate_password() - try: - self.host_api.service_get_by_compute_host(context, host) - except exception.NotFound: - msg = _("Compute host %s not found.") % host - raise exc.HTTPNotFound(explanation=msg) + if host is not None: + try: + self.host_api.service_get_by_compute_host(context, host) + except exception.NotFound: + msg = _("Compute host %s not found.") % host + raise exc.HTTPNotFound(explanation=msg) try: instance = self.compute_api.get(context, id, want_objects=True) @@ -99,6 +108,6 @@ class Evacuate(extensions.ExtensionDescriptor): updated = "2013-01-06T00:00:00Z" def get_controller_extensions(self): - controller = Controller() + controller = Controller(self.ext_mgr) extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] diff --git a/nova/api/openstack/compute/contrib/extended_evacuate_find_host.py b/nova/api/openstack/compute/contrib/extended_evacuate_find_host.py new file mode 100644 index 0000000000..2dfe3faff5 --- /dev/null +++ b/nova/api/openstack/compute/contrib/extended_evacuate_find_host.py @@ -0,0 +1,26 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.api.openstack import extensions + + +class Extended_evacuate_find_host(extensions.ExtensionDescriptor): + """Enables server evacuation without target host. Scheduler will select + one to target. + """ + name = "ExtendedEvacuateFindHost" + alias = "os-extended-evacuate-find-host" + namespace = ("http://docs.openstack.org/compute/ext/" + "extended_evacuate_find_host/api/v2") + updated = "2014-02-12T00:00:00Z" diff --git a/nova/tests/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/api/openstack/compute/contrib/test_evacuate.py index 5ef88da3a7..ed548e7e77 100644 --- a/nova/tests/api/openstack/compute/contrib/test_evacuate.py +++ b/nova/tests/api/openstack/compute/contrib/test_evacuate.py @@ -69,6 +69,11 @@ def setUp(self): for _method in self._methods: self.stubs.Set(compute_api.API, _method, fake_compute_api) + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Evacuate']) + def _get_admin_context(self, user_id='fake', project_id='fake'): ctxt = context.get_admin_context() ctxt.user_id = user_id diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py b/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py new file mode 100644 index 0000000000..187ff5a056 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py @@ -0,0 +1,114 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import mock +import webob + +from nova.compute import vm_states +from nova import context +from nova.objects import instance as instance_obj +from nova.openstack.common import jsonutils +from nova import test +from nova.tests.api.openstack import fakes +from nova.tests import fake_instance + + +class ExtendedEvacuateFindHostTest(test.NoDBTestCase): + + def setUp(self): + super(ExtendedEvacuateFindHostTest, self).setUp() + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Extended_evacuate_find_host', + 'Evacuate']) + self.UUID = uuid.uuid4() + + def _get_admin_context(self, user_id='fake', project_id='fake'): + ctxt = context.get_admin_context() + ctxt.user_id = user_id + ctxt.project_id = project_id + return ctxt + + def _fake_compute_api(*args, **kwargs): + return True + + def _fake_compute_api_get(self, context, instance_id, **kwargs): + instance = fake_instance.fake_db_instance(id=1, uuid=uuid, + task_state=None, + host='host1', + vm_state=vm_states.ACTIVE) + instance = instance_obj.Instance._from_db_object(context, + instance_obj.Instance(), + instance) + return instance + + def _fake_service_get_by_compute_host(self, context, host): + return {'host_name': host, + 'service': 'compute', + 'zone': 'nova' + } + + @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host') + @mock.patch('nova.compute.api.API.get') + @mock.patch('nova.compute.api.API.evacuate') + def test_evacuate_instance_with_no_target(self, evacuate_mock, + api_get_mock, + service_get_mock): + service_get_mock.side_effects = self._fake_service_get_by_compute_host + api_get_mock.side_effects = self._fake_compute_api_get + evacuate_mock.side_effects = self._fake_compute_api + + ctxt = self._get_admin_context() + app = fakes.wsgi_app(fake_auth_context=ctxt) + req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID) + req.method = 'POST' + req.body = jsonutils.dumps({ + 'evacuate': { + 'onSharedStorage': 'False', + 'adminPass': 'MyNewPass' + } + }) + req.content_type = 'application/json' + res = req.get_response(app) + self.assertEqual(200, res.status_int) + evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None, + mock.ANY, mock.ANY) + + @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host') + @mock.patch('nova.compute.api.API.get') + def test_no_target_fails_if_extension_not_loaded(self, api_get_mock, + service_get_mock): + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Evacuate']) + service_get_mock.side_effects = self._fake_service_get_by_compute_host + api_get_mock.side_effects = self._fake_compute_api_get + + ctxt = self._get_admin_context() + app = fakes.wsgi_app(fake_auth_context=ctxt) + req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID) + req.method = 'POST' + req.body = jsonutils.dumps({ + 'evacuate': { + 'onSharedStorage': 'False', + 'adminPass': 'MyNewPass' + } + }) + req.content_type = 'application/json' + res = req.get_response(app) + self.assertEqual(400, res.status_int) diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl index c408950225..ac203b83d4 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl @@ -671,6 +671,14 @@ "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2", "updated": "%(isotime)s" + }, + { + "alias": "os-extended-evacuate-find-host", + "description": "%(text)s", + "links": [], + "name": "ExtendedEvacuateFindHost", + "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2", + "updated": "%(isotime)s" } ] } diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl index 08c9b9f3c8..308d32136b 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl @@ -251,4 +251,7 @@ %(text)s + + %(text)s + diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl new file mode 100644 index 0000000000..5e2c2e6ef0 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl @@ -0,0 +1,6 @@ +{ + "evacuate": { + "adminPass": "%(adminPass)s", + "onSharedStorage": "%(onSharedStorage)s" + } +} diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl new file mode 100644 index 0000000000..a86c9e5c8a --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl @@ -0,0 +1,5 @@ + + + diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl new file mode 100644 index 0000000000..0da07da5b8 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl @@ -0,0 +1,3 @@ +{ + "adminPass": "%(password)s" +} diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl new file mode 100644 index 0000000000..b3b95fdde4 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl @@ -0,0 +1,2 @@ + +%(password)s \ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl new file mode 100644 index 0000000000..d3916d1aa6 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl @@ -0,0 +1,16 @@ +{ + "server" : { + "name" : "new-server-test", + "imageRef" : "%(host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", + "metadata" : { + "My Server Name" : "Apache1" + }, + "personality" : [ + { + "path" : "/etc/banner.txt", + "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" + } + ] + } +} diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl new file mode 100644 index 0000000000..f926149842 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl @@ -0,0 +1,19 @@ + + + + Apache1 + + + + ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp + dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k + IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs + c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g + QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo + ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv + dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy + c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 + b25zLiINCg0KLVJpY2hhcmQgQmFjaA== + + + diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl new file mode 100644 index 0000000000..d5f030c873 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl @@ -0,0 +1,16 @@ +{ + "server": { + "adminPass": "%(password)s", + "id": "%(id)s", + "links": [ + { + "href": "%(host)s/v2/openstack/servers/%(uuid)s", + "rel": "self" + }, + { + "href": "%(host)s/openstack/servers/%(uuid)s", + "rel": "bookmark" + } + ] + } +} diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl new file mode 100644 index 0000000000..3bb13e69bd --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl @@ -0,0 +1,6 @@ + + + + + + diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 29aa19af93..86494c6813 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -3238,6 +3238,53 @@ class EvacuateXmlTest(EvacuateJsonTest): ctype = 'xml' +class EvacuateFindHostSampleJsonTest(ServersSampleBase): + extends_name = ("nova.api.openstack.compute.contrib" + ".evacuate.Evacuate") + + extension_name = ("nova.api.openstack.compute.contrib" + ".extended_evacuate_find_host.Extended_evacuate_find_host") + + @mock.patch('nova.compute.manager.ComputeManager._check_instance_exists') + @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host') + @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') + def test_server_evacuate(self, rebuild_mock, service_get_mock, + check_instance_mock): + self.uuid = self._post_server() + + req_subs = { + "adminPass": "MySecretPass", + "onSharedStorage": 'False' + } + + check_instance_mock.return_value = False + + def fake_service_get_by_compute_host(self, context, host): + return { + 'host_name': host, + 'service': 'compute', + 'zone': 'nova' + } + service_get_mock.side_effect = fake_service_get_by_compute_host + with mock.patch.object(service_group_api.API, 'service_is_up', + return_value=False): + response = self._do_post('servers/%s/action' % self.uuid, + 'server-evacuate-find-host-req', req_subs) + subs = self._get_regexes() + self._verify_response('server-evacuate-find-host-resp', subs, + response, 200) + rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, + orig_image_ref=mock.ANY, image_ref=mock.ANY, + injected_files=mock.ANY, new_pass="MySecretPass", + orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, + on_shared_storage=False, preserve_ephemeral=mock.ANY, + host=None) + + +class EvacuateFindHostSampleXmlTests(EvacuateFindHostSampleJsonTest): + ctype = "xml" + + class FloatingIpDNSJsonTest(ApiSampleTestBaseV2): extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns." "Floating_ip_dns") From 642f1b2e83769580ba4537d49c8bb7207f33c4d3 Mon Sep 17 00:00:00 2001 From: Qin Zhao Date: Wed, 2 Jul 2014 14:29:16 +0800 Subject: [PATCH 103/486] Sync loopingcall from oslo This change syncs: bc48099 Log the function name of looping call ab5d5f1 Use timestamp in loopingcall e377393 Changes calcuation of variable delay Change-Id: Ie93695b2b76bc258922495911bc40c5aaf414a91 --- nova/openstack/common/loopingcall.py | 40 +++++++++++++++------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/nova/openstack/common/loopingcall.py b/nova/openstack/common/loopingcall.py index d072d24ada..82411af926 100644 --- a/nova/openstack/common/loopingcall.py +++ b/nova/openstack/common/loopingcall.py @@ -16,31 +16,36 @@ # under the License. import sys +import time from eventlet import event from eventlet import greenthread from nova.openstack.common.gettextutils import _LE, _LW from nova.openstack.common import log as logging -from nova.openstack.common import timeutils LOG = logging.getLogger(__name__) +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCall. + """Exception to break out and stop a LoopingCallBase. - The poll-function passed to LoopingCall can raise this exception to + The poll-function passed to LoopingCallBase can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCall.wait() + this return-value will be returned by LoopingCallBase.wait() """ def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return.""" + """:param retvalue: Value that LoopingCallBase.wait() should return.""" self.retvalue = retvalue @@ -72,16 +77,17 @@ def _inner(): try: while self._running: - start = timeutils.utcnow() + start = _ts() self.f(*self.args, **self.kw) - end = timeutils.utcnow() + end = _ts() if not self._running: break - delay = interval - timeutils.delta_seconds(start, end) - if delay <= 0: - LOG.warn(_LW('task run outlasted interval by %s sec') % - -delay) - greenthread.sleep(delay if delay > 0 else 0) + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)s run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': repr(self.f), 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) @@ -98,11 +104,6 @@ def _inner(): return self.done -# TODO(mikal): this class name is deprecated in Havana and should be removed -# in the I release -LoopingCall = FixedIntervalLoopingCall - - class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. @@ -126,8 +127,9 @@ def _inner(): if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) - LOG.debug('Dynamic looping call sleeping for %.02f ' - 'seconds', idle) + LOG.debug('Dynamic looping call %(func_name)s sleeping ' + 'for %(idle).02f seconds', + {'func_name': repr(self.f), 'idle': idle}) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() From bf02f134d711a040b117788009843e7edc9f3040 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Fri, 27 Jun 2014 13:03:20 +0100 Subject: [PATCH 104/486] libvirt: define XML schema for recording nova instance metadata Define an XML schema to be used in the libvirt guest config as a metadata block to provide information about the instance that is being run. When querying the libvirt guest XML this is visible thus: instance-00000004 a173eb64-f8e6-419b-a2b7-2d3283ecb26c demo 2014-06-27 11:22:25+GMT 2048 20 0 1 admin admin ...snip... Blueprint: libvirt-driver-domain-metadata Change-Id: I19432cb93bc1471118d9d323c712cdd9756a5926 --- nova/tests/virt/libvirt/test_config.py | 52 ++++++++++ nova/virt/libvirt/config.py | 129 +++++++++++++++++++++++-- 2 files changed, 173 insertions(+), 8 deletions(-) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index 8d14eb9f8d..e0363ed3e3 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -1735,3 +1735,55 @@ def test_config_cputune_timeslice(self): 50000 25000 """) + + +class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest): + + def test_config_metadata(self): + meta = config.LibvirtConfigGuestMetaNovaInstance() + meta.package = "2014.2.3" + meta.name = "moonbuggy" + meta.creationTime = 1234567890 + meta.roottype = "image" + meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426" + + owner = config.LibvirtConfigGuestMetaNovaOwner() + owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670" + owner.username = "buzz" + owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021" + owner.projectname = "moonshot" + + meta.owner = owner + + flavor = config.LibvirtConfigGuestMetaNovaFlavor() + flavor.name = "m1.lowgravity" + flavor.vcpus = 8 + flavor.memory = 2048 + flavor.swap = 10 + flavor.disk = 50 + flavor.ephemeral = 10 + + meta.flavor = flavor + + xml = meta.to_xml() + self.assertXmlEqual(xml, """ + + + moonbuggy + 2009-02-13 23:31:30 + + 2048 + 50 + 10 + 10 + 8 + + + buzz + moonshot + + + + """) diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 74d8816a10..7ff03ff6b5 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -23,6 +23,8 @@ helpers for populating up config object instances. """ +import time + from nova import exception from nova.openstack.common import log as logging from nova.openstack.common import units @@ -33,6 +35,9 @@ LOG = logging.getLogger(__name__) +# Namespace to use for Nova specific metadata items in XML +NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0" + class LibvirtConfigObject(object): @@ -43,18 +48,20 @@ def __init__(self, **kwargs): self.ns_prefix = kwargs.get('ns_prefix') self.ns_uri = kwargs.get('ns_uri') - @staticmethod - def _text_node(name, value): - child = etree.Element(name) + def _new_node(self, name): + if self.ns_uri is None: + return etree.Element(name) + else: + return etree.Element("{" + self.ns_uri + "}" + name, + nsmap={self.ns_prefix: self.ns_uri}) + + def _text_node(self, name, value): + child = self._new_node(name) child.text = str(value) return child def format_dom(self): - if self.ns_uri is None: - return etree.Element(self.root_name) - else: - return etree.Element("{" + self.ns_uri + "}" + self.root_name, - nsmap={self.ns_prefix: self.ns_uri}) + return self._new_node(self.root_name) def parse_str(self, xmlstr): self.parse_dom(etree.fromstring(xmlstr)) @@ -1186,6 +1193,7 @@ def __init__(self, **kwargs): self.os_smbios = None self.os_mach_type = None self.devices = [] + self.metadata = [] def _format_basic_props(self, root): root.append(self._text_node("uuid", self.uuid)) @@ -1198,6 +1206,12 @@ def _format_basic_props(self, root): else: root.append(self._text_node("vcpu", self.vcpus)) + if len(self.metadata) > 0: + metadata = etree.Element("metadata") + for m in self.metadata: + metadata.append(m.format_dom()) + root.append(metadata) + def _format_os(self, root): os = etree.Element("os") type_node = self._text_node("type", self.os_type) @@ -1435,3 +1449,102 @@ def format_dom(self): dev.append(backend) return dev + + +class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject): + + def __init__(self): + super(LibvirtConfigGuestMetaNovaInstance, + self).__init__(root_name="instance", + ns_prefix="nova", + ns_uri=NOVA_NS) + + self.package = None + self.flavor = None + self.name = None + self.creationTime = None + self.owner = None + self.roottype = None + self.rootid = None + + def format_dom(self): + meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom() + + pkg = self._new_node("package") + pkg.set("version", self.package) + meta.append(pkg) + if self.name is not None: + meta.append(self._text_node("name", self.name)) + if self.creationTime is not None: + timestr = time.strftime("%Y-%m-%d %H:%M:%S", + time.gmtime(self.creationTime)) + meta.append(self._text_node("creationTime", timestr)) + if self.flavor is not None: + meta.append(self.flavor.format_dom()) + if self.owner is not None: + meta.append(self.owner.format_dom()) + + if self.roottype is not None and self.rootid is not None: + root = self._new_node("root") + root.set("type", self.roottype) + root.set("uuid", str(self.rootid)) + meta.append(root) + + return meta + + +class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject): + + def __init__(self): + super(LibvirtConfigGuestMetaNovaFlavor, + self).__init__(root_name="flavor", + ns_prefix="nova", + ns_uri=NOVA_NS) + + self.name = None + self.memory = None + self.disk = None + self.swap = None + self.ephemeral = None + self.vcpus = None + + def format_dom(self): + meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom() + meta.set("name", self.name) + if self.memory is not None: + meta.append(self._text_node("memory", str(self.memory))) + if self.disk is not None: + meta.append(self._text_node("disk", str(self.disk))) + if self.swap is not None: + meta.append(self._text_node("swap", str(self.swap))) + if self.ephemeral is not None: + meta.append(self._text_node("ephemeral", str(self.ephemeral))) + if self.vcpus is not None: + meta.append(self._text_node("vcpus", str(self.vcpus))) + return meta + + +class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject): + + def __init__(self): + super(LibvirtConfigGuestMetaNovaOwner, + self).__init__(root_name="owner", + ns_prefix="nova", + ns_uri=NOVA_NS) + + self.userid = None + self.username = None + self.projectid = None + self.projectname = None + + def format_dom(self): + meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom() + if self.userid is not None and self.username is not None: + user = self._text_node("user", self.username) + user.set("uuid", self.userid) + meta.append(user) + if self.projectid is not None and self.projectname is not None: + project = self._text_node("project", self.projectname) + project.set("uuid", self.projectid) + meta.append(project) + return meta From 50b4ba4ee583d25eef10a6608172c002f9bec6f2 Mon Sep 17 00:00:00 2001 From: Paul Murray Date: Wed, 29 Jan 2014 18:05:52 +0000 Subject: [PATCH 105/486] Add extensible resources to resource tracker A resource plugin extension point is added to the resource tracker to allow the types of resources allocated at the compute node to be extensible. Information maintained by these plug-ins is written to the compute_nodes table in the database. The scheduler uses the information in the compute_nodes table to determine scheduling decisions. A plugin that implements vcpu resource tracking is included and all other code for tracking vcpu has been removed. This example ensures the plugins are tested in gate jobs. Co-Authored-By: Andrea Rosa Co-Authored-By: Paul Murray This is part of: blueprint extensible-resource-tracking Change-Id: I64108338e3c958ba1276aaf113a68861cbe286f5 --- nova/compute/claims.py | 39 +-- nova/compute/resource_tracker.py | 37 ++- nova/compute/resources/__init__.py | 133 ++++++++ nova/compute/resources/base.py | 93 ++++++ nova/compute/resources/vcpu.py | 83 +++++ nova/compute/stats.py | 20 +- nova/tests/compute/fake_resource_tracker.py | 2 + nova/tests/compute/test_claims.py | 48 ++- nova/tests/compute/test_resource_tracker.py | 42 ++- nova/tests/compute/test_resources.py | 344 ++++++++++++++++++++ nova/tests/compute/test_stats.py | 3 - setup.cfg | 2 + 12 files changed, 762 insertions(+), 84 deletions(-) create mode 100644 nova/compute/resources/__init__.py create mode 100644 nova/compute/resources/base.py create mode 100644 nova/compute/resources/vcpu.py create mode 100644 nova/tests/compute/test_resources.py diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 046d171692..4f5356ce78 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -42,10 +42,6 @@ def disk_gb(self): def memory_mb(self): return 0 - @property - def vcpus(self): - return 0 - def __enter__(self): return self @@ -57,8 +53,8 @@ def abort(self): pass def __str__(self): - return "[Claim: %d MB memory, %d GB disk, %d VCPUS]" % (self.memory_mb, - self.disk_gb, self.vcpus) + return "[Claim: %d MB memory, %d GB disk]" % (self.memory_mb, + self.disk_gb) class Claim(NopClaim): @@ -102,10 +98,6 @@ def disk_gb(self): def memory_mb(self): return self.instance['memory_mb'] + self.overhead['memory_mb'] - @property - def vcpus(self): - return self.instance['vcpus'] - def abort(self): """Compute operation requiring claimed resources has failed or been aborted. @@ -130,18 +122,16 @@ def _claim_test(self, resources, limits=None): # unlimited: memory_mb_limit = limits.get('memory_mb') disk_gb_limit = limits.get('disk_gb') - vcpu_limit = limits.get('vcpu') msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d " - "GB, VCPUs %(vcpus)d") - params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb, - 'vcpus': self.vcpus} + "GB") + params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb} LOG.audit(msg % params, instance=self.instance) reasons = [self._test_memory(resources, memory_mb_limit), self._test_disk(resources, disk_gb_limit), - self._test_cpu(resources, vcpu_limit), self._test_pci()] + reasons = reasons + self._test_ext_resources(limits) reasons = [r for r in reasons if r is not None] if len(reasons) > 0: raise exception.ComputeResourcesUnavailable(reason= @@ -176,14 +166,9 @@ def _test_pci(self): if not can_claim: return _('Claim pci failed.') - def _test_cpu(self, resources, limit): - type_ = _("CPUs") - unit = "VCPUs" - total = resources['vcpus'] - used = resources['vcpus_used'] - requested = self.vcpus - - return self._test(type_, unit, total, used, requested, limit) + def _test_ext_resources(self, limits): + return self.tracker.ext_resources_handler.test_resources( + self.instance, limits) def _test(self, type_, unit, total, used, requested, limit): """Test if the given type of resource needed for a claim can be safely @@ -235,10 +220,6 @@ def disk_gb(self): def memory_mb(self): return self.instance_type['memory_mb'] + self.overhead['memory_mb'] - @property - def vcpus(self): - return self.instance_type['vcpus'] - def _test_pci(self): pci_requests = pci_request.get_instance_pci_requests( self.instance, 'new_') @@ -248,6 +229,10 @@ def _test_pci(self): if not claim: return _('Claim pci failed.') + def _test_ext_resources(self, limits): + return self.tracker.ext_resources_handler.test_resources( + self.instance_type, limits) + def abort(self): """Compute operation requiring claimed resources has failed or been aborted. diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index fb65f77c3a..d1eb96cf72 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -24,6 +24,7 @@ from nova.compute import claims from nova.compute import flavors from nova.compute import monitors +from nova.compute import resources as ext_resources from nova.compute import task_states from nova.compute import vm_states from nova import conductor @@ -46,7 +47,10 @@ help='Amount of memory in MB to reserve for the host'), cfg.StrOpt('compute_stats_class', default='nova.compute.stats.Stats', - help='Class that will manage stats for the local compute host') + help='Class that will manage stats for the local compute host'), + cfg.ListOpt('compute_resources', + default=['vcpu'], + help='The names of the extra resources to track.'), ] CONF = cfg.CONF @@ -75,6 +79,8 @@ def __init__(self, host, driver, nodename): self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self) + self.ext_resources_handler = \ + ext_resources.ResourceHandler(CONF.compute_resources) self.notifier = rpc.get_notifier() self.old_resources = {} @@ -229,12 +235,10 @@ def drop_resize_claim(self, instance, instance_type=None, prefix='new_'): instance_type = self._get_instance_type(ctxt, instance, prefix) if instance_type['id'] == itype['id']: - self.stats.update_stats_for_migration(itype, sign=-1) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(instance, sign=-1) self._update_usage(self.compute_node, itype, sign=-1) - self.compute_node['stats'] = jsonutils.dumps(self.stats) ctxt = context.get_admin_context() self._update(ctxt, self.compute_node) @@ -377,9 +381,20 @@ def _sync_compute_node(self, context, resources): LOG.info(_('Compute_service record updated for %(host)s:%(node)s') % {'host': self.host, 'node': self.nodename}) + def _write_ext_resources(self, resources): + resources['stats'] = {} + resources['stats'].update(self.stats) + self.ext_resources_handler.write_resources(resources) + def _create(self, context, values): """Create the compute node in the DB.""" # initialize load stats from existing instances: + self._write_ext_resources(values) + # NOTE(pmurray): the stats field is stored as a json string. The + # json conversion will be done automatically by the ComputeNode object + # so this can be removed when using ComputeNode. + values['stats'] = jsonutils.dumps(values['stats']) + self.compute_node = self.conductor_api.compute_node_create(context, values) @@ -449,10 +464,17 @@ def _resource_change(self, resources): def _update(self, context, values): """Persist the compute node updates to the DB.""" + self._write_ext_resources(values) + # NOTE(pmurray): the stats field is stored as a json string. The + # json conversion will be done automatically by the ComputeNode object + # so this can be removed when using ComputeNode. + values['stats'] = jsonutils.dumps(values['stats']) + if not self._resource_change(values): return if "service" in self.compute_node: del self.compute_node['service'] + self.compute_node = self.conductor_api.compute_node_update( context, self.compute_node, values) if self.pci_tracker: @@ -475,7 +497,7 @@ def _update_usage(self, resources, usage, sign=1): resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances - resources['vcpus_used'] = self.stats.num_vcpus_used + self.ext_resources_handler.update_from_instance(usage, sign) def _update_usage_from_migration(self, context, instance, resources, migration): @@ -518,11 +540,9 @@ def _update_usage_from_migration(self, context, instance, resources, migration['old_instance_type_id']) if itype: - self.stats.update_stats_for_migration(itype) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(instance) self._update_usage(resources, itype) - resources['stats'] = jsonutils.dumps(self.stats) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps( self.pci_tracker.stats) @@ -595,7 +615,6 @@ def _update_usage_from_instance(self, resources, instance): self._update_usage(resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() - resources['stats'] = jsonutils.dumps(self.stats) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: @@ -615,7 +634,6 @@ def _update_usage_from_instances(self, resources, instances): # set some initial values, reserve room for host/hypervisor: resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024 resources['memory_mb_used'] = CONF.reserved_host_memory_mb - resources['vcpus_used'] = 0 resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - @@ -623,6 +641,9 @@ def _update_usage_from_instances(self, resources, instances): resources['current_workload'] = 0 resources['running_vms'] = 0 + # Reset values for extended resources + self.ext_resources_handler.reset_resources(resources, self.driver) + for instance in instances: if instance['vm_state'] == vm_states.DELETED: continue diff --git a/nova/compute/resources/__init__.py b/nova/compute/resources/__init__.py new file mode 100644 index 0000000000..cb023ea523 --- /dev/null +++ b/nova/compute/resources/__init__.py @@ -0,0 +1,133 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stevedore + +from nova.i18n import _LW +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +RESOURCE_NAMESPACE = 'nova.compute.resources' + + +class ResourceHandler(): + + def _log_missing_plugins(self, names): + for name in names: + if name not in self._mgr.names(): + LOG.warn(_LW('Compute resource plugin %s was not loaded') % + name) + + def __init__(self, names, propagate_map_exceptions=False): + """Initialise the resource handler by loading the plugins. + + The ResourceHandler uses stevedore to load the resource plugins. + The handler can handle and report exceptions raised in the plugins + depending on the value of the propagate_map_exceptions parameter. + It is useful in testing to propagate exceptions so they are exposed + as part of the test. If exceptions are not propagated they are + logged at error level. + + Any named plugins that are not located are logged. + + :param names: the list of plugins to load by name + :param propagate_map_exceptions: True indicates exceptions in the + plugins should be raised, False indicates they should be handled and + logged. + """ + self._mgr = stevedore.NamedExtensionManager( + namespace=RESOURCE_NAMESPACE, + names=names, + propagate_map_exceptions=propagate_map_exceptions, + invoke_on_load=True) + self._log_missing_plugins(names) + + def reset_resources(self, resources, driver): + """Reset the resources to their initial state. + + Each plugin is called to reset its state. The resources data provided + is initial state gathered from the hypervisor. The driver is also + provided in case the plugin needs to obtain additional information + from the driver, for example, the memory calculation obtains + the memory overhead from the driver. + + :param resources: the resources reported by the hypervisor + :param driver: the driver for the hypervisor + + :returns: None + """ + if self._mgr.extensions: + self._mgr.map_method('reset', resources, driver) + + def test_resources(self, usage, limits): + """Test the ability to support the given instance. + + Each resource plugin is called to determine if it's resource is able + to support the additional requirements of a new instance. The + plugins either return None to indicate they have sufficient resource + available or a human readable string to indicate why they can not. + + :param usage: the additional resource usage + :param limits: limits used for the calculation + + :returns: a list or return values from the plugins + """ + if not self._mgr.extensions: + return [] + + reasons = self._mgr.map_method('test', usage, limits) + return reasons + + def update_from_instance(self, usage, sign=1): + """Update the resource information to reflect the allocation for + an instance with the given resource usage. + + :param usage: the resource usage of the instance + :param sign: has value 1 or -1. 1 indicates the instance is being + added to the current usage, -1 indicates the instance is being removed. + + :returns: None + """ + if not self._mgr.extensions: + return + + if sign == 1: + self._mgr.map_method('add_instance', usage) + else: + self._mgr.map_method('remove_instance', usage) + + def write_resources(self, resources): + """Write the resource data to populate the resources. + + Each resource plugin is called to write its resource data to + resources. + + :param resources: the compute node resources + + :returns: None + """ + if self._mgr.extensions: + self._mgr.map_method('write', resources) + + def report_free_resources(self): + """Each resource plugin is called to log free resource information. + + :returns: None + """ + if not self._mgr.extensions: + return + + self._mgr.map_method('report_free') diff --git a/nova/compute/resources/base.py b/nova/compute/resources/base.py new file mode 100644 index 0000000000..aebd29fb40 --- /dev/null +++ b/nova/compute/resources/base.py @@ -0,0 +1,93 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class Resource(object): + """This base class defines the interface used for compute resource + plugins. It is not necessary to use this base class, but all compute + resource plugins must implement the abstract methods found here. + An instance of the plugin object is instantiated when it is loaded + by calling __init__() with no parameters. + """ + + @abc.abstractmethod + def reset(self, resources, driver): + """Set the resource to an initial state based on the resource + view discovered from the hypervisor. + """ + pass + + @abc.abstractmethod + def test(self, usage, limits): + """Test to see if we have sufficient resources to allocate for + an instance with the given resource usage. + + :param usage: the resource usage of the instances + :param limits: limits to apply + + :returns: None if the test passes or a string describing the reason + why the test failed + """ + pass + + @abc.abstractmethod + def add_instance(self, usage): + """Update resource information adding allocation according to the + given resource usage. + + :param usage: the resource usage of the instance being added + + :returns: None + """ + pass + + @abc.abstractmethod + def remove_instance(self, usage): + """Update resource information removing allocation according to the + given resource usage. + + :param usage: the resource usage of the instance being removed + + :returns: None + + """ + pass + + @abc.abstractmethod + def write(self, resources): + """Write resource data to populate resources. + + :param resources: the resources data to be populated + + :returns: None + """ + pass + + @abc.abstractmethod + def report_free(self): + """Log free resources. + + This method logs how much free resource is held by + the resource plugin. + + :returns: None + """ + pass diff --git a/nova/compute/resources/vcpu.py b/nova/compute/resources/vcpu.py new file mode 100644 index 0000000000..e7290a3e1a --- /dev/null +++ b/nova/compute/resources/vcpu.py @@ -0,0 +1,83 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.compute.resources import base +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class VCPU(base.Resource): + """VCPU compute resource plugin. + + This is effectively a simple counter based on the vcpu requirement of each + instance. + """ + def __init__(self): + # initialize to a 'zero' resource. + # reset will be called to set real resource values + self._total = 0 + self._used = 0 + + def reset(self, resources, driver): + # total vcpu is reset to the value taken from resources. + self._total = int(resources['vcpus']) + self._used = 0 + + def _get_requested(self, usage): + return int(usage.get('vcpus', 0)) + + def _get_limit(self, limits): + if limits and 'vcpu' in limits: + return int(limits.get('vcpu')) + + def test(self, usage, limits): + requested = self._get_requested(usage) + limit = self._get_limit(limits) + + LOG.debug('Total CPUs: %(total)d VCPUs, used: %(used).02f VCPUs' % + {'total': self._total, 'used': self._used}) + + if limit is None: + # treat resource as unlimited: + LOG.debug('CPUs limit not specified, defaulting to unlimited') + return + + free = limit - self._used + + # Oversubscribed resource policy info: + LOG.debug('CPUs limit: %(limit).02f VCPUs, free: %(free).02f VCPUs' % + {'limit': limit, 'free': free}) + + if requested > free: + return ('Free CPUs %(free).02f VCPUs < ' + 'requested %(requested)d VCPUs' % + {'free': free, 'requested': requested}) + + def add_instance(self, usage): + requested = int(usage.get('vcpus', 0)) + self._used += requested + + def remove_instance(self, usage): + requested = int(usage.get('vcpus', 0)) + self._used -= requested + + def write(self, resources): + resources['vcpus'] = self._total + resources['vcpus_used'] = self._used + + def report_free(self): + free_vcpus = self._total - self._used + LOG.debug('Free VCPUs: %s' % free_vcpus) diff --git a/nova/compute/stats.py b/nova/compute/stats.py index bf183b012c..b347b8d5c0 100644 --- a/nova/compute/stats.py +++ b/nova/compute/stats.py @@ -73,10 +73,6 @@ def num_os_type(self, os_type): key = "num_os_type_%s" % os_type return self.get(key, 0) - @property - def num_vcpus_used(self): - return self.get("num_vcpus_used", 0) - def update_stats_for_instance(self, instance): """Update stats after an instance is changed.""" @@ -91,14 +87,12 @@ def update_stats_for_instance(self, instance): self._decrement("num_task_%s" % old_state['task_state']) self._decrement("num_os_type_%s" % old_state['os_type']) self._decrement("num_proj_%s" % old_state['project_id']) - x = self.get("num_vcpus_used", 0) - self["num_vcpus_used"] = x - old_state['vcpus'] else: # new instance self._increment("num_instances") # Now update stats from the new instance state: - (vm_state, task_state, os_type, project_id, vcpus) = \ + (vm_state, task_state, os_type, project_id) = \ self._extract_state_from_instance(instance) if vm_state == vm_states.DELETED: @@ -110,16 +104,10 @@ def update_stats_for_instance(self, instance): self._increment("num_task_%s" % task_state) self._increment("num_os_type_%s" % os_type) self._increment("num_proj_%s" % project_id) - x = self.get("num_vcpus_used", 0) - self["num_vcpus_used"] = x + vcpus # save updated I/O workload in stats: self["io_workload"] = self.io_workload - def update_stats_for_migration(self, instance_type, sign=1): - x = self.get("num_vcpus_used", 0) - self["num_vcpus_used"] = x + (sign * instance_type['vcpus']) - def _decrement(self, key): x = self.get(key, 0) self[key] = x - 1 @@ -136,10 +124,8 @@ def _extract_state_from_instance(self, instance): task_state = instance['task_state'] os_type = instance['os_type'] project_id = instance['project_id'] - vcpus = instance['vcpus'] self.states[uuid] = dict(vm_state=vm_state, task_state=task_state, - os_type=os_type, project_id=project_id, - vcpus=vcpus) + os_type=os_type, project_id=project_id) - return (vm_state, task_state, os_type, project_id, vcpus) + return (vm_state, task_state, os_type, project_id) diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py index c8f1e14647..b0fec2042b 100644 --- a/nova/tests/compute/fake_resource_tracker.py +++ b/nova/tests/compute/fake_resource_tracker.py @@ -20,10 +20,12 @@ class FakeResourceTracker(resource_tracker.ResourceTracker): """Version without a DB requirement.""" def _create(self, context, values): + self._write_ext_resources(values) self.compute_node = values self.compute_node['id'] = 1 def _update(self, context, values, prune_stats=False): + self._write_ext_resources(values) self.compute_node.update(values) def _get_service(self, context): diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py index be60f54016..0df1875c17 100644 --- a/nova/tests/compute/test_claims.py +++ b/nova/tests/compute/test_claims.py @@ -25,10 +25,21 @@ from nova import test +class FakeResourceHandler(object): + test_called = False + usage_is_instance = False + + def test_resources(self, usage, limits): + self.test_called = True + self.usage_is_itype = usage.get('name') is 'fakeitype' + return [] + + class DummyTracker(object): icalled = False rcalled = False pci_tracker = pci_manager.PciDevTracker() + ext_resources_handler = FakeResourceHandler() def abort_instance_claim(self, *args, **kwargs): self.icalled = True @@ -101,9 +112,6 @@ def assertRaisesRegexp(self, re_obj, e, fn, *a, **kw): except e as ee: self.assertTrue(re.search(re_obj, str(ee))) - def test_cpu_unlimited(self): - self._claim(vcpus=100000) - def test_memory_unlimited(self): self._claim(memory_mb=99999999) @@ -113,10 +121,6 @@ def test_disk_unlimited_root(self): def test_disk_unlimited_ephemeral(self): self._claim(ephemeral_gb=999999) - def test_cpu_oversubscription(self): - limits = {'vcpu': 16} - self._claim(limits, vcpus=8) - def test_memory_with_overhead(self): overhead = {'memory_mb': 8} limits = {'memory_mb': 2048} @@ -131,11 +135,6 @@ def test_memory_with_overhead_insufficient(self): self._claim, limits=limits, overhead=overhead, memory_mb=2040) - def test_cpu_insufficient(self): - limits = {'vcpu': 16} - self.assertRaises(exception.ComputeResourcesUnavailable, - self._claim, limits=limits, vcpus=17) - def test_memory_oversubscription(self): self._claim(memory_mb=4096) @@ -162,21 +161,6 @@ def test_disk_and_memory_insufficient(self): self._claim, limits=limits, root_gb=10, ephemeral_gb=40, memory_mb=16384) - def test_disk_and_cpu_insufficient(self): - limits = {'disk_gb': 45, 'vcpu': 16} - self.assertRaisesRegexp(re.compile("disk.*vcpus", re.IGNORECASE), - exception.ComputeResourcesUnavailable, - self._claim, limits=limits, root_gb=10, ephemeral_gb=40, - vcpus=17) - - def test_disk_and_cpu_and_memory_insufficient(self): - limits = {'disk_gb': 45, 'vcpu': 16, 'memory_mb': 8192} - pat = "memory.*disk.*vcpus" - self.assertRaisesRegexp(re.compile(pat, re.IGNORECASE), - exception.ComputeResourcesUnavailable, - self._claim, limits=limits, root_gb=10, ephemeral_gb=40, - vcpus=17, memory_mb=16384) - def test_pci_pass(self): dev_dict = { 'compute_node_id': 1, @@ -224,6 +208,11 @@ def test_pci_pass_no_requests(self): self._set_pci_request(claim) claim._test_pci() + def test_ext_resources(self): + self._claim() + self.assertTrue(self.tracker.ext_resources_handler.test_called) + self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype) + def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.icalled) @@ -260,6 +249,11 @@ def _set_pci_request(self, claim): claim.instance.update( system_metadata={'new_pci_requests': jsonutils.dumps(request)}) + def test_ext_resources(self): + self._claim() + self.assertTrue(self.tracker.ext_resources_handler.test_called) + self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype) + def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.rcalled) diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 364cfd6e2d..06112e245a 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -22,6 +22,7 @@ from nova.compute import flavors from nova.compute import resource_tracker +from nova.compute import resources from nova.compute import task_states from nova.compute import vm_states from nova import context @@ -45,6 +46,7 @@ EPHEMERAL_GB = 1 FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB FAKE_VIRT_VCPUS = 1 +RESOURCE_NAMES = ['vcpu'] CONF = cfg.CONF @@ -160,8 +162,10 @@ def _create_compute_node(self, values=None): "current_workload": 1, "running_vms": 0, "cpu_info": None, - "stats": [{"key": "num_instances", "value": "1"}], - "hypervisor_hostname": "fakenode", + "stats": { + "num_instances": "1", + }, + "hypervisor_hostname": "fakenode", } if values: compute.update(values) @@ -314,6 +318,8 @@ def _tracker(self, host=None): driver = self._driver() tracker = resource_tracker.ResourceTracker(host, driver, node) + tracker.ext_resources_handler = \ + resources.ResourceHandler(RESOURCE_NAMES, True) return tracker @@ -566,6 +572,38 @@ def _driver(self): return FakeVirtDriver(pci_support=True) +class TrackerExtraResourcesTestCase(BaseTrackerTestCase): + + def setUp(self): + super(TrackerExtraResourcesTestCase, self).setUp() + self.driver = self._driver() + + def _driver(self): + return FakeVirtDriver() + + def test_set_empty_ext_resources(self): + resources = self.driver.get_available_resource(self.tracker.nodename) + self.assertNotIn('stats', resources) + self.tracker._write_ext_resources(resources) + self.assertIn('stats', resources) + + def test_set_extra_resources(self): + def fake_write_resources(resources): + resources['stats']['resA'] = '123' + resources['stats']['resB'] = 12 + + self.stubs.Set(self.tracker.ext_resources_handler, + 'write_resources', + fake_write_resources) + + resources = self.driver.get_available_resource(self.tracker.nodename) + self.tracker._write_ext_resources(resources) + + expected = {"resA": "123", "resB": 12} + self.assertEqual(sorted(expected), + sorted(resources['stats'])) + + class InstanceClaimTestCase(BaseTrackerTestCase): def test_update_usage_only_for_tracked(self): diff --git a/nova/tests/compute/test_resources.py b/nova/tests/compute/test_resources.py new file mode 100644 index 0000000000..db2722ccb5 --- /dev/null +++ b/nova/tests/compute/test_resources.py @@ -0,0 +1,344 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the compute extra resources framework.""" + + +from oslo.config import cfg +from stevedore import extension +from stevedore import named + +from nova.compute import resources +from nova.compute.resources import base +from nova.compute.resources import vcpu +from nova import context +from nova.i18n import _ +from nova.objects import flavor as flavor_obj +from nova import test +from nova.tests.fake_instance import fake_instance_obj + +CONF = cfg.CONF + + +class FakeResourceHandler(resources.ResourceHandler): + def __init__(self, extensions): + self._mgr = \ + named.NamedExtensionManager.make_test_instance(extensions) + + +class FakeResource(base.Resource): + + def __init__(self): + self.total_res = 0 + self.used_res = 0 + + def _get_requested(self, usage): + if 'extra_specs' not in usage: + return + if self.resource_name not in usage['extra_specs']: + return + req = usage['extra_specs'][self.resource_name] + return int(req) + + def _get_limit(self, limits): + if self.resource_name not in limits: + return + limit = limits[self.resource_name] + return int(limit) + + def reset(self, resources, driver): + self.total_res = 0 + self.used_res = 0 + + def test(self, usage, limits): + requested = self._get_requested(usage) + if not requested: + return + + limit = self._get_limit(limits) + if not limit: + return + + free = limit - self.used_res + if requested <= free: + return + else: + return (_('Free %(free)d < requested %(requested)d ') % + {'free': free, 'requested': requested}) + + def add_instance(self, usage): + requested = self._get_requested(usage) + if requested: + self.used_res += requested + + def remove_instance(self, usage): + requested = self._get_requested(usage) + if requested: + self.used_res -= requested + + def write(self, resources): + pass + + def report_free(self): + return "Free %s" % (self.total_res - self.used_res) + + +class ResourceA(FakeResource): + + def reset(self, resources, driver): + # ResourceA uses a configuration option + self.total_res = int(CONF.resA) + self.used_res = 0 + self.resource_name = 'resource:resA' + + def write(self, resources): + resources['resA'] = self.total_res + resources['used_resA'] = self.used_res + + +class ResourceB(FakeResource): + + def reset(self, resources, driver): + # ResourceB uses resource details passed in parameter resources + self.total_res = resources['resB'] + self.used_res = 0 + self.resource_name = 'resource:resB' + + def write(self, resources): + resources['resB'] = self.total_res + resources['used_resB'] = self.used_res + + +def fake_flavor_obj(**updates): + flavor = flavor_obj.Flavor() + flavor.id = 1 + flavor.name = 'fakeflavor' + flavor.memory_mb = 8000 + flavor.vcpus = 3 + flavor.root_gb = 11 + flavor.ephemeral_gb = 4 + flavor.swap = 0 + flavor.rxtx_factor = 1.0 + flavor.vcpu_weight = 1 + if updates: + flavor.update(updates) + return flavor + + +class BaseTestCase(test.TestCase): + + def _initialize_used_res_counter(self): + # Initialize the value for the used resource + for ext in self.r_handler._mgr.extensions: + ext.obj.used_res = 0 + + def setUp(self): + super(BaseTestCase, self).setUp() + + # initialize flavors and stub get_by_id to + # get flavors from here + self._flavors = {} + self.ctxt = context.get_admin_context() + + # Create a flavor without extra_specs defined + _flavor_id = 1 + _flavor = fake_flavor_obj(id=_flavor_id) + self._flavors[_flavor_id] = _flavor + + # Create a flavor with extra_specs defined + _flavor_id = 2 + requested_resA = 5 + requested_resB = 7 + requested_resC = 7 + _extra_specs = {'resource:resA': requested_resA, + 'resource:resB': requested_resB, + 'resource:resC': requested_resC} + _flavor = fake_flavor_obj(id=_flavor_id, + extra_specs=_extra_specs) + self._flavors[_flavor_id] = _flavor + + # create fake resource extensions and resource handler + _extensions = [ + extension.Extension('resA', None, ResourceA, ResourceA()), + extension.Extension('resB', None, ResourceB, ResourceB()), + ] + self.r_handler = FakeResourceHandler(_extensions) + + # Resources details can be passed to each plugin or can be specified as + # configuration options + driver_resources = {'resB': 5} + CONF.resA = '10' + + # initialise the resources + self.r_handler.reset_resources(driver_resources, None) + + def test_update_from_instance_with_extra_specs(self): + # Flavor with extra_specs + _flavor_id = 2 + sign = 1 + self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) + + expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA'] + expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB'] + self.assertEqual(int(expected_resA), + self.r_handler._mgr['resA'].obj.used_res) + self.assertEqual(int(expected_resB), + self.r_handler._mgr['resB'].obj.used_res) + + def test_update_from_instance_without_extra_specs(self): + # Flavor id without extra spec + _flavor_id = 1 + self._initialize_used_res_counter() + self.r_handler.resource_list = [] + sign = 1 + self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) + self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res) + self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res) + + def test_write_resources(self): + self._initialize_used_res_counter() + extra_resources = {} + expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0} + self.r_handler.write_resources(extra_resources) + self.assertEqual(expected, extra_resources) + + def test_test_resources_without_extra_specs(self): + limits = {} + # Flavor id without extra_specs + flavor = self._flavors[1] + result = self.r_handler.test_resources(flavor, limits) + self.assertEqual([None, None], result) + + def test_test_resources_with_limits_for_different_resource(self): + limits = {'resource:resC': 20} + # Flavor id with extra_specs + flavor = self._flavors[2] + result = self.r_handler.test_resources(flavor, limits) + self.assertEqual([None, None], result) + + def test_passing_test_resources(self): + limits = {'resource:resA': 10, 'resource:resB': 20} + # Flavor id with extra_specs + flavor = self._flavors[2] + self._initialize_used_res_counter() + result = self.r_handler.test_resources(flavor, limits) + self.assertEqual([None, None], result) + + def test_failing_test_resources_for_single_resource(self): + limits = {'resource:resA': 4, 'resource:resB': 20} + # Flavor id with extra_specs + flavor = self._flavors[2] + self._initialize_used_res_counter() + result = self.r_handler.test_resources(flavor, limits) + expected = ['Free 4 < requested 5 ', None] + self.assertEqual(sorted(expected), + sorted(result)) + + def test_empty_resource_handler(self): + """An empty resource handler has no resource extensions, + should have no effect, and should raise no exceptions. + """ + empty_r_handler = FakeResourceHandler([]) + + resources = {} + empty_r_handler.reset_resources(resources, None) + + flavor = self._flavors[1] + sign = 1 + empty_r_handler.update_from_instance(flavor, sign) + + limits = {} + test_result = empty_r_handler.test_resources(flavor, limits) + self.assertEqual([], test_result) + + sign = -1 + empty_r_handler.update_from_instance(flavor, sign) + + extra_resources = {} + expected_extra_resources = extra_resources + empty_r_handler.write_resources(extra_resources) + self.assertEqual(expected_extra_resources, extra_resources) + + empty_r_handler.report_free_resources() + + def test_vcpu_resource_load(self): + # load the vcpu example + names = ['vcpu'] + real_r_handler = resources.ResourceHandler(names) + ext_names = real_r_handler._mgr.names() + self.assertEqual(names, ext_names) + + # check the extension loaded is the one we expect + # and an instance of the object has been created + ext = real_r_handler._mgr['vcpu'] + self.assertIsInstance(ext.obj, vcpu.VCPU) + + +class TestVCPU(test.TestCase): + + def setUp(self): + super(TestVCPU, self).setUp() + self._vcpu = vcpu.VCPU() + self._vcpu._total = 10 + self._vcpu._used = 0 + self._flavor = fake_flavor_obj(vcpus=5) + self._big_flavor = fake_flavor_obj(vcpus=20) + self._instance = fake_instance_obj(None) + + def test_reset(self): + # set vcpu values to something different to test reset + self._vcpu._total = 10 + self._vcpu._used = 5 + + driver_resources = {'vcpus': 20} + self._vcpu.reset(driver_resources, None) + self.assertEqual(20, self._vcpu._total) + self.assertEqual(0, self._vcpu._used) + + def test_add_and_remove_instance(self): + self._vcpu.add_instance(self._flavor) + self.assertEqual(10, self._vcpu._total) + self.assertEqual(5, self._vcpu._used) + + self._vcpu.remove_instance(self._flavor) + self.assertEqual(10, self._vcpu._total) + self.assertEqual(0, self._vcpu._used) + + def test_test_pass_limited(self): + result = self._vcpu.test(self._flavor, {'vcpu': 10}) + self.assertIsNone(result, 'vcpu test failed when it should pass') + + def test_test_pass_unlimited(self): + result = self._vcpu.test(self._big_flavor, {}) + self.assertIsNone(result, 'vcpu test failed when it should pass') + + def test_test_fail(self): + result = self._vcpu.test(self._flavor, {'vcpu': 2}) + expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs') + self.assertEqual(expected, result) + + def test_write(self): + resources = {'stats': {}} + self._vcpu.write(resources) + expected = { + 'vcpus': 10, + 'vcpus_used': 0, + 'stats': { + 'num_vcpus': 10, + 'num_vcpus_used': 0 + } + } + self.assertEqual(sorted(expected), + sorted(resources)) diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py index 1864ac7950..c90314b0fc 100644 --- a/nova/tests/compute/test_stats.py +++ b/nova/tests/compute/test_stats.py @@ -136,8 +136,6 @@ def test_add_stats_for_instance(self): self.assertEqual(1, self.stats["num_vm_None"]) self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING]) - self.assertEqual(10, self.stats.num_vcpus_used) - def test_calculate_workload(self): self.stats._increment("num_task_None") self.stats._increment("num_task_" + task_states.SCHEDULING) @@ -191,7 +189,6 @@ def test_update_stats_for_instance_deleted(self): self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) - self.assertEqual(0, self.stats.num_vcpus_used) def test_io_workload(self): vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED] diff --git a/setup.cfg b/setup.cfg index cb8c651ff2..50c185cf30 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,8 @@ packages = nova [entry_points] +nova.compute.resources = + vcpu = nova.compute.resources.vcpu:VCPU nova.image.download.modules = file = nova.image.download.file console_scripts = From a60949888f550dc29c9f382f97589c6b1b978cf5 Mon Sep 17 00:00:00 2001 From: Arx Cruz Date: Wed, 14 May 2014 19:02:50 -0300 Subject: [PATCH 106/486] Inject expected results for IBM Power when testing bus devices Same as in https://review.openstack.org/#/c/93621 this patch inject expected values when the test is running on IBM Power platform, since the ide bus isn't supported on IBM Power, and libvirt translate all ide calls directly to scsi when it's running in power platform Change-Id: Ibebc96e71a8ba21ae8634e9e22de1cd28f3a7990 --- nova/tests/virt/libvirt/test_blockinfo.py | 28 +++++++++++++++++++++-- nova/tests/virt/libvirt/test_driver.py | 12 +++++++++- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/nova/tests/virt/libvirt/test_blockinfo.py b/nova/tests/virt/libvirt/test_blockinfo.py index 62f8589362..88032a19d9 100644 --- a/nova/tests/virt/libvirt/test_blockinfo.py +++ b/nova/tests/virt/libvirt/test_blockinfo.py @@ -261,6 +261,9 @@ def test_get_disk_mapping_simple_swap(self): def test_get_disk_mapping_simple_configdrive(self): # A simple disk mapping setup, but with configdrive added + # It's necessary to check if the architecture is power, because + # power doesn't have support to ide, and so libvirt translate + # all ide calls to scsi self.flags(force_config_drive=True) @@ -270,18 +273,32 @@ def test_get_disk_mapping_simple_configdrive(self): mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide") + # The last device is selected for this. on x86 is the last ide + # device (hdd). Since power only support scsi, the last device + # is sdz + + bus_ppc = ("scsi", "sdz") + expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc} + + bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}), + ("ide", "hdd")) + expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, - 'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'cdrom'}, + 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'} } + self.assertEqual(expect, mapping) def test_get_disk_mapping_cdrom_configdrive(self): # A simple disk mapping setup, with configdrive added as cdrom + # It's necessary to check if the architecture is power, because + # power doesn't have support to ide, and so libvirt translate + # all ide calls to scsi self.flags(force_config_drive=True) self.flags(config_drive_format='iso9660') @@ -292,14 +309,21 @@ def test_get_disk_mapping_cdrom_configdrive(self): mapping = blockinfo.get_disk_mapping("kvm", instance_ref, "virtio", "ide") + bus_ppc = ("scsi", "sdz") + expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc} + + bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}), + ("ide", "hdd")) + expect = { 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'}, 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, - 'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'cdrom'}, + 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'}, 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk', 'boot_index': '1'} } + self.assertEqual(expect, mapping) def test_get_disk_mapping_disk_configdrive(self): diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 3a7903ba66..f251957eef 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -1223,6 +1223,10 @@ def test_get_guest_config_with_block_device(self): self.assertTrue(info['block_device_mapping'][1].save.called) def test_get_guest_config_with_configdrive(self): + # It's necessary to check if the architecture is power, because + # power doesn't have support to ide, and so libvirt translate + # all ide calls to scsi + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -1233,9 +1237,15 @@ def test_get_guest_config_with_configdrive(self): instance_ref) cfg = conn._get_guest_config(instance_ref, [], {}, disk_info) + # The last device is selected for this. on x86 is the last ide + # device (hdd). Since power only support scsi, the last device + # is sdz + + expect = {"ppc": "sdz", "ppc64": "sdz"} + disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd") self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) - self.assertEqual(cfg.devices[2].target_dev, 'hdd') + self.assertEqual(cfg.devices[2].target_dev, disk) def test_get_guest_config_with_virtio_scsi_bus(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) From e25ca687095f3dc22b2d681cb45b7be840d0080d Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 16 Jul 2014 12:38:52 -0400 Subject: [PATCH 107/486] Gate on F402/pep8 rename instances of loop variables to prevent them from shadowing imports. This strict check was added in hacking 0.9 Change-Id: Ib5e162b85e3c6931f213731c3febd3dd8ba0b4b1 --- .../openstack/compute/contrib/used_limits.py | 8 +- nova/db/sqlalchemy/api.py | 4 +- nova/tests/api/ec2/test_api.py | 16 ++-- nova/tests/api/ec2/test_cinder_cloud.py | 4 +- .../api/openstack/compute/test_servers.py | 2 +- nova/tests/compute/test_compute.py | 5 +- nova/tests/virt/libvirt/test_driver.py | 8 +- nova/tests/virt/test_hardware.py | 90 +++++++++---------- nova/tests/virt/xenapi/test_xenapi.py | 12 +-- nova/virt/libvirt/driver.py | 56 ++++++------ tox.ini | 3 +- 11 files changed, 104 insertions(+), 104 deletions(-) diff --git a/nova/api/openstack/compute/contrib/used_limits.py b/nova/api/openstack/compute/contrib/used_limits.py index 12b34cd265..4cfd3948dc 100644 --- a/nova/api/openstack/compute/contrib/used_limits.py +++ b/nova/api/openstack/compute/contrib/used_limits.py @@ -61,11 +61,11 @@ def index(self, req, resp_obj): 'totalSecurityGroupsUsed': 'security_groups', } used_limits = {} - for display_name, quota in quota_map.iteritems(): - if quota in quotas: - reserved = (quotas[quota]['reserved'] + for display_name, key in quota_map.iteritems(): + if key in quotas: + reserved = (quotas[key]['reserved'] if self._reserved(req) else 0) - used_limits[display_name] = quotas[quota]['in_use'] + reserved + used_limits[display_name] = quotas[key]['in_use'] + reserved resp_obj.obj['limits']['absolute'].update(used_limits) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f6e4cdf8e0..d2400f012c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2807,8 +2807,8 @@ def quota_get_all_by_project_and_user(context, project_id, user_id): all() result = {'project_id': project_id, 'user_id': user_id} - for quota in user_quotas: - result[quota.resource] = quota.hard_limit + for user_quota in user_quotas: + result[user_quota.resource] = user_quota.hard_limit return result diff --git a/nova/tests/api/ec2/test_api.py b/nova/tests/api/ec2/test_api.py index c5c3a63457..924497a802 100644 --- a/nova/tests/api/ec2/test_api.py +++ b/nova/tests/api/ec2/test_api.py @@ -398,26 +398,26 @@ def test_group_name_valid_chars_security_group(self): (True, "test name", bad_amazon_ec2), (False, bad_strict_ec2, "test desc"), ] - for test in test_raise: + for t in test_raise: self.expect_http() self.mox.ReplayAll() - self.flags(ec2_strict_validation=test[0]) + self.flags(ec2_strict_validation=t[0]) self.assertRaises(boto_exc.EC2ResponseError, self.ec2.create_security_group, - test[1], - test[2]) + t[1], + t[2]) test_accept = [ (False, bad_amazon_ec2, "test desc"), (False, "test name", bad_amazon_ec2), ] - for test in test_accept: + for t in test_accept: self.expect_http() self.mox.ReplayAll() - self.flags(ec2_strict_validation=test[0]) - self.ec2.create_security_group(test[1], test[2]) + self.flags(ec2_strict_validation=t[0]) + self.ec2.create_security_group(t[1], t[2]) self.expect_http() self.mox.ReplayAll() - self.ec2.delete_security_group(test[1]) + self.ec2.delete_security_group(t[1]) def test_group_name_valid_length_security_group(self): """Test that we sanely handle invalid security group names. diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index 13155700cd..bb48b68312 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -515,9 +515,9 @@ def _setUpBlockDeviceMapping(self): def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): for vol in volumes: self.volume_api.delete(self.context, vol['id']) - for uuid in (inst1['uuid'], inst2['uuid']): + for instance_uuid in (inst1['uuid'], inst2['uuid']): for bdm in db.block_device_mapping_get_all_by_instance( - self.context, uuid): + self.context, instance_uuid): db.block_device_mapping_destroy(self.context, bdm['id']) db.instance_destroy(self.context, inst2['uuid']) db.instance_destroy(self.context, inst1['uuid']) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 513df07f3a..a65041c86f 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -2774,7 +2774,7 @@ def _instance_destroy(*args, **kwargs): self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm) self.stubs.Set(objects.Instance, 'destroy', _instance_destroy) - for _ in xrange(len(bdm_exceptions)): + for _unused in xrange(len(bdm_exceptions)): params = {'block_device_mapping_v2': [bdm.copy()]} self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 1f3173c637..52bd3b2e18 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -6158,8 +6158,9 @@ def fetch_instance_migration_status(instance_uuid): self.compute._poll_unconfirmed_resizes(ctxt) - for uuid, status in expected_migration_status.iteritems(): - self.assertEqual(status, fetch_instance_migration_status(uuid)) + for instance_uuid, status in expected_migration_status.iteritems(): + self.assertEqual(status, + fetch_instance_migration_status(instance_uuid)) def test_instance_build_timeout_mixed_instances(self): # Tests that instances which failed to build within the configured diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index a917cb52f2..2a7c42fa72 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -3851,8 +3851,8 @@ def connection_supports_direct_io_stub(dirpath): network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') - for disk in disks: - self.assertEqual(disk.get("cache"), "none") + for guest_disk in disks: + self.assertEqual(guest_disk.get("cache"), "none") directio_supported = False @@ -3865,8 +3865,8 @@ def connection_supports_direct_io_stub(dirpath): network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') - for disk in disks: - self.assertEqual(disk.get("cache"), "writethrough") + for guest_disk in disks: + self.assertEqual(guest_disk.get("cache"), "writethrough") def _check_xml_and_disk_bus(self, image_meta, block_device_info, wantConfig): diff --git a/nova/tests/virt/test_hardware.py b/nova/tests/virt/test_hardware.py index 9a08cd6dfc..6436556e0a 100644 --- a/nova/tests/virt/test_hardware.py +++ b/nova/tests/virt/test_hardware.py @@ -294,24 +294,24 @@ def test_validate_config(self): }, ] - for test in testdata: - if type(test["expect"]) == tuple: + for topo_test in testdata: + if type(topo_test["expect"]) == tuple: (preferred, maximum) = hw.VirtCPUTopology.get_topology_constraints( - test["flavor"], - test["image"]) - - self.assertEqual(test["expect"][0], preferred.sockets) - self.assertEqual(test["expect"][1], preferred.cores) - self.assertEqual(test["expect"][2], preferred.threads) - self.assertEqual(test["expect"][3], maximum.sockets) - self.assertEqual(test["expect"][4], maximum.cores) - self.assertEqual(test["expect"][5], maximum.threads) + topo_test["flavor"], + topo_test["image"]) + + self.assertEqual(topo_test["expect"][0], preferred.sockets) + self.assertEqual(topo_test["expect"][1], preferred.cores) + self.assertEqual(topo_test["expect"][2], preferred.threads) + self.assertEqual(topo_test["expect"][3], maximum.sockets) + self.assertEqual(topo_test["expect"][4], maximum.cores) + self.assertEqual(topo_test["expect"][5], maximum.threads) else: - self.assertRaises(test["expect"], + self.assertRaises(topo_test["expect"], hw.VirtCPUTopology.get_topology_constraints, - test["flavor"], - test["image"]) + topo_test["flavor"], + topo_test["image"]) def test_possible_configs(self): testdata = [ @@ -400,28 +400,28 @@ def test_possible_configs(self): }, ] - for test in testdata: - if type(test["expect"]) == list: + for topo_test in testdata: + if type(topo_test["expect"]) == list: actual = [] for topology in hw.VirtCPUTopology.get_possible_topologies( - test["vcpus"], - hw.VirtCPUTopology(test["maxsockets"], - test["maxcores"], - test["maxthreads"]), - test["allow_threads"]): + topo_test["vcpus"], + hw.VirtCPUTopology(topo_test["maxsockets"], + topo_test["maxcores"], + topo_test["maxthreads"]), + topo_test["allow_threads"]): actual.append([topology.sockets, topology.cores, topology.threads]) - self.assertEqual(test["expect"], actual) + self.assertEqual(topo_test["expect"], actual) else: - self.assertRaises(test["expect"], + self.assertRaises(topo_test["expect"], hw.VirtCPUTopology.get_possible_topologies, - test["vcpus"], - hw.VirtCPUTopology(test["maxsockets"], - test["maxcores"], - test["maxthreads"]), - test["allow_threads"]) + topo_test["vcpus"], + hw.VirtCPUTopology(topo_test["maxsockets"], + topo_test["maxcores"], + topo_test["maxthreads"]), + topo_test["allow_threads"]) def test_sorting_configs(self): testdata = [ @@ -492,26 +492,26 @@ def test_sorting_configs(self): }, ] - for test in testdata: + for topo_test in testdata: actual = [] possible = hw.VirtCPUTopology.get_possible_topologies( - test["vcpus"], - hw.VirtCPUTopology(test["maxsockets"], - test["maxcores"], - test["maxthreads"]), - test["allow_threads"]) + topo_test["vcpus"], + hw.VirtCPUTopology(topo_test["maxsockets"], + topo_test["maxcores"], + topo_test["maxthreads"]), + topo_test["allow_threads"]) tops = hw.VirtCPUTopology.sort_possible_topologies( possible, - hw.VirtCPUTopology(test["sockets"], - test["cores"], - test["threads"])) + hw.VirtCPUTopology(topo_test["sockets"], + topo_test["cores"], + topo_test["threads"])) for topology in tops: actual.append([topology.sockets, topology.cores, topology.threads]) - self.assertEqual(test["expect"], actual) + self.assertEqual(topo_test["expect"], actual) def test_best_config(self): testdata = [ @@ -626,12 +626,12 @@ def test_best_config(self): }, ] - for test in testdata: + for topo_test in testdata: topology = hw.VirtCPUTopology.get_desirable_configs( - test["flavor"], - test["image"], - test["allow_threads"])[0] + topo_test["flavor"], + topo_test["image"], + topo_test["allow_threads"])[0] - self.assertEqual(test["expect"][0], topology.sockets) - self.assertEqual(test["expect"][1], topology.cores) - self.assertEqual(test["expect"][2], topology.threads) + self.assertEqual(topo_test["expect"][0], topology.sockets) + self.assertEqual(topo_test["expect"][1], topology.cores) + self.assertEqual(topo_test["expect"][2], topology.threads) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 6b923e55b1..bee3642ceb 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -2867,9 +2867,9 @@ def _create_service_entries(context, values={'avail_zone1': ['fake_host1', 'fake_host2'], 'avail_zone2': ['fake_host3'], }): for avail_zone, hosts in values.iteritems(): - for host in hosts: + for service_host in hosts: db.service_create(context, - {'host': host, + {'host': service_host, 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0}) @@ -3063,8 +3063,8 @@ def _aggregate_setup(self, aggr_name='fake_aggregate', if metadata: aggregate.metadata.update(metadata) aggregate.create(self.context) - for host in hosts: - aggregate.add_host(host) + for aggregate_host in hosts: + aggregate.add_host(aggregate_host) return aggregate def test_add_host_to_aggregate_invalid_changing_status(self): @@ -3104,9 +3104,9 @@ def test_remove_host_from_aggregate_error(self): metadata = {pool_states.POOL_FLAG: "XenAPI", pool_states.KEY: pool_states.ACTIVE} db.aggregate_metadata_add(self.context, aggr['id'], metadata) - for host in values[fake_zone]: + for aggregate_host in values[fake_zone]: aggr = self.api.add_host_to_aggregate(self.context, - aggr['id'], host) + aggr['id'], aggregate_host) # let's mock the fact that the aggregate is in error! expected = self.api.remove_host_from_aggregate(self.context, aggr['id'], diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 1a61ff0291..b233c1fadb 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1740,27 +1740,27 @@ def _volume_snapshot_create(self, context, instance, domain, network_disks_to_snap = [] # network disks (netfs, gluster, etc.) disks_to_skip = [] # local disks not snapshotted - for disk in device_info.devices: - if (disk.root_name != 'disk'): + for guest_disk in device_info.devices: + if (guest_disk.root_name != 'disk'): continue - if (disk.target_dev is None): + if (guest_disk.target_dev is None): continue - if (disk.serial is None or disk.serial != volume_id): - disks_to_skip.append(disk.target_dev) + if (guest_disk.serial is None or guest_disk.serial != volume_id): + disks_to_skip.append(guest_disk.target_dev) continue # disk is a Cinder volume with the correct volume_id disk_info = { - 'dev': disk.target_dev, - 'serial': disk.serial, - 'current_file': disk.source_path, - 'source_protocol': disk.source_protocol, - 'source_name': disk.source_name, - 'source_hosts': disk.source_hosts, - 'source_ports': disk.source_ports + 'dev': guest_disk.target_dev, + 'serial': guest_disk.serial, + 'current_file': guest_disk.source_path, + 'source_protocol': guest_disk.source_protocol, + 'source_name': guest_disk.source_name, + 'source_hosts': guest_disk.source_hosts, + 'source_ports': guest_disk.source_ports } # Determine path for new_file based on current path @@ -1952,15 +1952,15 @@ def _volume_snapshot_delete(self, context, instance, volume_id, device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) - for disk in device_info.devices: - if (disk.root_name != 'disk'): + for guest_disk in device_info.devices: + if (guest_disk.root_name != 'disk'): continue - if (disk.target_dev is None or disk.serial is None): + if (guest_disk.target_dev is None or guest_disk.serial is None): continue - if disk.serial == volume_id: - my_dev = disk.target_dev + if guest_disk.serial == volume_id: + my_dev = guest_disk.target_dev if my_dev is None: msg = _('Disk with id: %s ' @@ -3348,20 +3348,20 @@ def _get_guest_config(self, instance, network_info, image_meta, tmhpet.present = False clk.add_timer(tmhpet) - for cfg in self._get_guest_storage_config(instance, + for config in self._get_guest_storage_config(instance, image_meta, disk_info, rescue, block_device_info, flavor): - guest.add_device(cfg) + guest.add_device(config) for vif in network_info: - cfg = self.vif_driver.get_config(instance, + config = self.vif_driver.get_config(instance, vif, image_meta, flavor) - guest.add_device(cfg) + guest.add_device(config) if ((CONF.libvirt.virt_type == "qemu" or CONF.libvirt.virt_type == "kvm")): @@ -5349,17 +5349,17 @@ def get_io_devices(xml_doc): # get io status xml = domain.XMLDesc(0) dom_io = get_io_devices(xml) - for disk in dom_io["volumes"]: + for guest_disk in dom_io["volumes"]: try: # blockStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt - stats = domain.blockStats(disk) - output[disk + "_read_req"] = stats[0] - output[disk + "_read"] = stats[1] - output[disk + "_write_req"] = stats[2] - output[disk + "_write"] = stats[3] - output[disk + "_errors"] = stats[4] + stats = domain.blockStats(guest_disk) + output[guest_disk + "_read_req"] = stats[0] + output[guest_disk + "_read"] = stats[1] + output[guest_disk + "_write_req"] = stats[2] + output[guest_disk + "_write"] = stats[3] + output[guest_disk + "_errors"] = stats[4] except libvirt.libvirtError: pass for interface in dom_io["ifaces"]: diff --git a/tox.ini b/tox.ini index 06dc18e388..85e452a02d 100644 --- a/tox.ini +++ b/tox.ini @@ -56,10 +56,9 @@ sitepackages = False # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # The rest of the ignores are TODOs # New from hacking 0.9: E129, E131, E265, H407, H405, H904 -# Stricter in hacking 0.9: F402 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,F402,H405,H904 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,H405,H904 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools [hacking] From bb7f20565bdcc6d5c4a7426b97384e26c12e5d25 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 20 May 2014 15:01:18 -0700 Subject: [PATCH 108/486] Fix nova/pci direct use of object modules This replaces all uses of nova.objects.. with nova.objects. within nova/pci and nova/tests/pci. Change-Id: I72e763fb5032a5dc2cf649e5e29213ea71578c8d Partial-Blueprint: object-subclassing --- nova/pci/pci_manager.py | 12 +++++------- nova/tests/pci/test_pci_manager.py | 5 ++--- nova/tests/pci/test_pci_stats.py | 8 ++++---- nova/tests/pci/test_pci_whitelist.py | 10 +++++----- 4 files changed, 16 insertions(+), 19 deletions(-) diff --git a/nova/pci/pci_manager.py b/nova/pci/pci_manager.py index e99117b881..f5f57af11c 100644 --- a/nova/pci/pci_manager.py +++ b/nova/pci/pci_manager.py @@ -21,8 +21,7 @@ from nova import context from nova import exception from nova.i18n import _ -from nova.objects import instance -from nova.objects import pci_device as pci_device_obj +from nova import objects from nova.openstack.common import log as logging from nova.pci import pci_device from nova.pci import pci_request @@ -58,8 +57,7 @@ def __init__(self, node_id=None): self.stats = pci_stats.PciDeviceStats() if node_id: self.pci_devs = list( - pci_device_obj.PciDeviceList.get_by_compute_node( - context, node_id)) + objects.PciDeviceList.get_by_compute_node(context, node_id)) else: self.pci_devs = [] self._initial_instance_usage() @@ -189,7 +187,7 @@ def set_hvdevs(self, devices): for dev in [dev for dev in devices if dev['address'] in new_addrs - exist_addrs]: dev['compute_node_id'] = self.node_id - dev_obj = pci_device_obj.PciDevice.create(dev) + dev_obj = objects.PciDevice.create(dev) self.pci_devs.append(dev_obj) self.stats.add_device(dev_obj) @@ -314,9 +312,9 @@ def set_compute_node_id(self, node_id): def get_instance_pci_devs(inst): """Get the devices assigned to the instances.""" - if isinstance(inst, instance.Instance): + if isinstance(inst, objects.Instance): return inst.pci_devices else: ctxt = context.get_admin_context() - return pci_device_obj.PciDeviceList.get_by_instance_uuid( + return objects.PciDeviceList.get_by_instance_uuid( ctxt, inst['uuid']) diff --git a/nova/tests/pci/test_pci_manager.py b/nova/tests/pci/test_pci_manager.py index a1e24d0720..734e89d07a 100644 --- a/nova/tests/pci/test_pci_manager.py +++ b/nova/tests/pci/test_pci_manager.py @@ -21,7 +21,6 @@ from nova import db from nova import exception from nova import objects -from nova.objects import pci_device as pci_device_obj from nova.pci import pci_device from nova.pci import pci_manager from nova.pci import pci_request @@ -75,7 +74,7 @@ class PciDevTrackerTestCase(test.TestCase): def _create_fake_instance(self): self.inst = objects.Instance() self.inst.uuid = 'fake-inst-uuid' - self.inst.pci_devices = pci_device_obj.PciDeviceList() + self.inst.pci_devices = objects.PciDeviceList() self.inst.vm_state = vm_states.ACTIVE self.inst.task_state = None @@ -334,7 +333,7 @@ def test_get_devs_object(self): def _fake_obj_load_attr(foo, attrname): if attrname == 'pci_devices': self.load_attr_called = True - foo.pci_devices = pci_device_obj.PciDeviceList() + foo.pci_devices = objects.PciDeviceList() inst = fakes.stub_instance(id='1') ctxt = context.get_admin_context() diff --git a/nova/tests/pci/test_pci_stats.py b/nova/tests/pci/test_pci_stats.py index 9104e2ea9e..bf27a68114 100644 --- a/nova/tests/pci/test_pci_stats.py +++ b/nova/tests/pci/test_pci_stats.py @@ -14,7 +14,7 @@ # under the License. from nova import exception -from nova.objects import pci_device +from nova import objects from nova.openstack.common import jsonutils from nova.pci import pci_stats as pci from nova import test @@ -51,9 +51,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase): def _create_fake_devs(self): - self.fake_dev_1 = pci_device.PciDevice.create(fake_pci_1) - self.fake_dev_2 = pci_device.PciDevice.create(fake_pci_2) - self.fake_dev_3 = pci_device.PciDevice.create(fake_pci_3) + self.fake_dev_1 = objects.PciDevice.create(fake_pci_1) + self.fake_dev_2 = objects.PciDevice.create(fake_pci_2) + self.fake_dev_3 = objects.PciDevice.create(fake_pci_3) map(self.pci_stats.add_device, [self.fake_dev_1, self.fake_dev_2, self.fake_dev_3]) diff --git a/nova/tests/pci/test_pci_whitelist.py b/nova/tests/pci/test_pci_whitelist.py index 0b737eb7bb..ae923ef0f6 100644 --- a/nova/tests/pci/test_pci_whitelist.py +++ b/nova/tests/pci/test_pci_whitelist.py @@ -14,7 +14,7 @@ # under the License. from nova import exception -from nova.objects import pci_device +from nova import objects from nova.pci import pci_whitelist from nova import test @@ -59,7 +59,7 @@ def test_whitelist(self): 'product_id': '0001'}]) def test_whitelist_empty(self): - dev = pci_device.PciDevice.create(dev_dict) + dev = objects.PciDevice.create(dev_dict) parsed = pci_whitelist.PciHostDevicesWhiteList() self.assertEqual(parsed.device_assignable(dev), False) @@ -73,13 +73,13 @@ def test_whitelist_multiple(self): {'vendor_id': '8087', 'product_id': '0002'}]) def test_device_assignable(self): - dev = pci_device.PciDevice.create(dev_dict) + dev = objects.PciDevice.create(dev_dict) white_list = '[{"product_id":"0001", "vendor_id":"8086"}]' parsed = pci_whitelist.PciHostDevicesWhiteList([white_list]) self.assertEqual(parsed.device_assignable(dev), True) def test_device_assignable_multiple(self): - dev = pci_device.PciDevice.create(dev_dict) + dev = objects.PciDevice.create(dev_dict) white_list_1 = '[{"product_id":"0001", "vendor_id":"8086"}]' white_list_2 = '[{"product_id":"0002", "vendor_id":"8087"}]' parsed = pci_whitelist.PciHostDevicesWhiteList( @@ -93,5 +93,5 @@ def test_get_pci_devices_filter(self): white_list_1 = '[{"product_id":"0001", "vendor_id":"8086"}]' self.flags(pci_passthrough_whitelist=[white_list_1]) pci_filter = pci_whitelist.get_pci_devices_filter() - dev = pci_device.PciDevice.create(dev_dict) + dev = objects.PciDevice.create(dev_dict) self.assertEqual(pci_filter.device_assignable(dev), True) From c03c90ea5d5873ce41ad73603d3792194b6ca2e2 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Sat, 19 Jul 2014 11:43:34 +0200 Subject: [PATCH 109/486] Re-add H803 to flake8 ignore list. I6c2537dd27c947e36ebf37eb3b5c8a1ab8b026a1 mistakenly removed H803 since running flake8 had no hits for this test, but H803 is the period in the commit message which we are explicitly ignoring until the next version of hacking is released (which will remove this check). Change-Id: I22860cc61336ef5bf8ce4509fe130160bdf7e45b --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 85e452a02d..2bcec9b530 100644 --- a/tox.ini +++ b/tox.ini @@ -58,7 +58,7 @@ sitepackages = False # New from hacking 0.9: E129, E131, E265, H407, H405, H904 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,H405,H904 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,H405,H803,H904 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools [hacking] From 18a2917504cf9be4370243fabc655d0de1656707 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Fri, 2 May 2014 02:07:32 +0800 Subject: [PATCH 110/486] Adjust audit logs to avoid negative mem/cpu info we might have following audit info in the log: AUDIT nova.compute.resource_tracker [-] Free ram (MB): -1559 AUDIT nova.compute.resource_tracker [-] Free disk (GB): 29 AUDIT nova.compute.resource_tracker [-] Free VCPUS: -3 which is really confusing to operater and useless This patch adjust the log to print phy info and virtual cpus. DocImpact Change-Id: I2bc63e1aae0787bf976660eed8fd8899bfffe593 Closes-Bug: #1316916 --- nova/compute/resource_tracker.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index fb65f77c3a..116836d8ad 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -423,17 +423,22 @@ def _report_hypervisor_resource_view(self, resources): LOG.debug("Hypervisor: no assignable PCI devices") def _report_final_resource_view(self, resources): - """Report final calculate of free memory, disk, CPUs, and PCI devices, + """Report final calculate of physical memory, used virtual memory, + disk, usable vCPUs, used virtual CPUs and PCI devices, including instance calculations and in-progress resource claims. These values will be exposed via the compute node table to the scheduler. """ - LOG.audit(_("Free ram (MB): %s") % resources['free_ram_mb']) + LOG.audit(_("Total physical ram (MB): %(pram)s, " + "total allocated virtual ram (MB): %(vram)s"), + {'pram': resources['memory_mb'], + 'vram': resources['memory_mb_used']}) LOG.audit(_("Free disk (GB): %s") % resources['free_disk_gb']) vcpus = resources['vcpus'] if vcpus: - free_vcpus = vcpus - resources['vcpus_used'] - LOG.audit(_("Free VCPUS: %s") % free_vcpus) + LOG.audit(_("Total usable vcpus: %(tcpu)s, " + "total allocated vcpus: %(ucpu)s"), + {'tcpu': vcpus, 'ucpu': resources['vcpus_used']}) else: LOG.audit(_("Free VCPU information unavailable")) From d509d16da00e7e5146651e2a3541c0456c0f5368 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 18 Jun 2014 12:15:06 +0900 Subject: [PATCH 111/486] Add API schema for v2.1/v3 reset_server_state API By defining the API schema, it is possible to separate the validation code from the API method. The API method can be more simple. In addition, a response of API validation error can be consistent for the whole Nova API. Partially implements blueprint v3-api-schema Change-Id: Ib6bf4d1f1d875249d1758d4414377d47e15a3198 --- .../compute/plugins/v3/admin_actions.py | 13 ++++---- .../compute/schemas/v3/reset_server_state.py | 32 +++++++++++++++++++ .../compute/plugins/v3/test_admin_actions.py | 18 +++++------ 3 files changed, 47 insertions(+), 16 deletions(-) create mode 100644 nova/api/openstack/compute/schemas/v3/reset_server_state.py diff --git a/nova/api/openstack/compute/plugins/v3/admin_actions.py b/nova/api/openstack/compute/plugins/v3/admin_actions.py index 0ebb82f32e..3759cc5939 100644 --- a/nova/api/openstack/compute/plugins/v3/admin_actions.py +++ b/nova/api/openstack/compute/plugins/v3/admin_actions.py @@ -16,18 +16,21 @@ from webob import exc from nova.api.openstack import common +from nova.api.openstack.compute.schemas.v3 import reset_server_state from nova.api.openstack import extensions from nova.api.openstack import wsgi +from nova.api import validation from nova import compute from nova.compute import vm_states from nova import exception -from nova.i18n import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) ALIAS = "os-admin-actions" # States usable in resetState action +# NOTE: It is necessary to update the schema of nova/api/openstack/compute/ +# schemas/v3/reset_server_state.py, when updating this state_map. state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR) @@ -71,18 +74,14 @@ def _inject_network_info(self, req, id, body): @extensions.expected_errors((400, 404)) @wsgi.action('reset_state') + @validation.schema(reset_server_state.reset_state) def _reset_state(self, req, id, body): """Permit admins to reset the state of a server.""" context = req.environ["nova.context"] authorize(context, 'reset_state') # Identify the desired state from the body - try: - state = state_map[body["reset_state"]["state"]] - except (TypeError, KeyError): - msg = _("Desired state must be specified. Valid states " - "are: %s") % ', '.join(sorted(state_map.keys())) - raise exc.HTTPBadRequest(explanation=msg) + state = state_map[body["reset_state"]["state"]] instance = common.get_instance(self.compute_api, context, id, want_objects=True) diff --git a/nova/api/openstack/compute/schemas/v3/reset_server_state.py b/nova/api/openstack/compute/schemas/v3/reset_server_state.py new file mode 100644 index 0000000000..ca8bd09337 --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/reset_server_state.py @@ -0,0 +1,32 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +reset_state = { + 'type': 'object', + 'properties': { + 'reset_state': { + 'type': 'object', + 'properties': { + 'state': { + 'type': 'string', + 'enum': ['active', 'error'], + }, + }, + 'required': ['state'], + 'additionalProperties': False, + }, + }, + 'required': ['reset_state'], + 'additionalProperties': False, +} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py index 97265aa12c..728e85a943 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py @@ -196,16 +196,16 @@ def setUp(self): self.context = self.request.environ['nova.context'] def test_no_state(self): - self.assertRaises(webob.exc.HTTPBadRequest, + self.assertRaises(exception.ValidationError, self.admin_api._reset_state, self.request, self.uuid, - {"reset_state": None}) + body={"reset_state": None}) def test_bad_state(self): - self.assertRaises(webob.exc.HTTPBadRequest, + self.assertRaises(exception.ValidationError, self.admin_api._reset_state, self.request, self.uuid, - {"reset_state": {"state": "spam"}}) + body={"reset_state": {"state": "spam"}}) def test_no_instance(self): self.mox.StubOutWithMock(self.compute_api, 'get') @@ -218,7 +218,7 @@ def test_no_instance(self): self.assertRaises(webob.exc.HTTPNotFound, self.admin_api._reset_state, self.request, self.uuid, - {"reset_state": {"state": "active"}}) + body={"reset_state": {"state": "active"}}) def _setup_mock(self, expected): instance = objects.Instance() @@ -248,8 +248,8 @@ def test_reset_active(self): self.mox.ReplayAll() body = {"reset_state": {"state": "active"}} - result = self.admin_api._reset_state(self.request, self.uuid, body) - + result = self.admin_api._reset_state(self.request, self.uuid, + body=body) self.assertEqual(202, result.status_int) def test_reset_error(self): @@ -257,6 +257,6 @@ def test_reset_error(self): task_state=None)) self.mox.ReplayAll() body = {"reset_state": {"state": "error"}} - result = self.admin_api._reset_state(self.request, self.uuid, body) - + result = self.admin_api._reset_state(self.request, self.uuid, + body=body) self.assertEqual(202, result.status_int) From 908f2cf457376b50918581cb859cf7f50883d466 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 16 Jul 2014 09:06:18 +0000 Subject: [PATCH 112/486] Fix unit tests related to cloudpipe_update In some unit tests related to cloudpipe_update, there are two negative factors(ex. bad url and bad body) and current tests don't verify its purposes. This patch removes unnecessary factor. Change-Id: I5e78b47e6f4c7359a7f0c16898a26e3649d92eb2 --- .../api/openstack/compute/contrib/test_cloudpipe_update.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py index 1284ed8f39..403562f02c 100644 --- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py +++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py @@ -57,7 +57,7 @@ def test_cloudpipe_configure_project(self): def test_cloudpipe_configure_project_bad_url(self): req = fakes.HTTPRequest.blank( '/v2/fake/os-cloudpipe/configure-projectx') - body = {"vpn_ip": "1.2.3.4", "vpn_port": 222} + body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'configure-projectx', body) @@ -65,7 +65,7 @@ def test_cloudpipe_configure_project_bad_url(self): def test_cloudpipe_configure_project_bad_data(self): req = fakes.HTTPRequest.blank( '/v2/fake/os-cloudpipe/configure-project') - body = {"vpn_ipxx": "1.2.3.4", "vpn_port": 222} + body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}} self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.update, req, 'configure-project', body) From cb1e37caf174ed65c23b5fd51b28586a859291a9 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 16 Jul 2014 09:12:48 +0000 Subject: [PATCH 113/486] Fix error status code for cloudpipe_update When passing bad body in a request, most APIs return BadRequest response. However, cloudpipe_update API doesn't do it. This patch fixes the error status code and adds a unit test related to this change. Change-Id: I16a48e6913f52baa870e04196529fda619c03098 --- .../api/openstack/compute/contrib/cloudpipe_update.py | 5 +++-- .../compute/contrib/test_cloudpipe_update.py | 11 ++++++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/compute/contrib/cloudpipe_update.py b/nova/api/openstack/compute/contrib/cloudpipe_update.py index e601eb282a..4ac040fb60 100644 --- a/nova/api/openstack/compute/contrib/cloudpipe_update.py +++ b/nova/api/openstack/compute/contrib/cloudpipe_update.py @@ -51,8 +51,9 @@ def update(self, req, id, body): network.vpn_public_address = vpn_ip network.vpn_public_port = vpn_port network.save() - except (TypeError, KeyError, ValueError): - raise webob.exc.HTTPUnprocessableEntity() + except (TypeError, KeyError, ValueError) as ex: + msg = _("Invalid request body: %s") % unicode(ex) + raise webob.exc.HTTPBadRequest(explanation=msg) return webob.exc.HTTPAccepted() diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py index 403562f02c..b17722902d 100644 --- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py +++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py @@ -66,6 +66,15 @@ def test_cloudpipe_configure_project_bad_data(self): req = fakes.HTTPRequest.blank( '/v2/fake/os-cloudpipe/configure-project') body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}} - self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, + 'configure-project', body) + + def test_cloudpipe_configure_project_bad_vpn_port(self): + req = fakes.HTTPRequest.blank( + '/v2/fake/os-cloudpipe/configure-project') + body = {"configure_project": {"vpn_ipxx": "1.2.3.4", + "vpn_port": "foo"}} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'configure-project', body) From ac236c2cd73c9d7e05ef9fbd22a4f8f099262082 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Sat, 12 Apr 2014 19:30:12 +0800 Subject: [PATCH 114/486] Check instance state before attach/detach interface Currently there isn't any instance's status checking before attach/detach interface, It will fail when some status didn't support it. This patch add checking for it. This patch allow attach/detach interface for ACTIVE, PAUSED and STOPPED instance. * ACTIVE: The interface is hotplug to instance. * PAUSED: The interface can be hotpluged after instance unpaused. It's fixed by commit: a868fcedf8e46070cae6aa8e59e61934fa23db1c * STOPPED: In this status, the instance is destroyed. It just update the instance configuration with new interface. When the start instance, the instance will be recreated with new configuration. Change-Id: I3c038056085be1f655758ed8b6a44bcdbf70cdd5 Closes-bug: #1299333 --- .../compute/contrib/attach_interfaces.py | 7 ++++ .../compute/plugins/v3/attach_interfaces.py | 6 +++ nova/compute/api.py | 6 +++ .../compute/contrib/test_attach_interfaces.py | 39 ++++++++++++++++++ .../plugins/v3/test_attach_interfaces.py | 41 +++++++++++++++++++ nova/tests/compute/test_compute_api.py | 30 ++++++++++++++ nova/tests/fake_policy.py | 3 ++ 7 files changed, 132 insertions(+) diff --git a/nova/api/openstack/compute/contrib/attach_interfaces.py b/nova/api/openstack/compute/contrib/attach_interfaces.py index f3b4761724..01f22c00b5 100644 --- a/nova/api/openstack/compute/contrib/attach_interfaces.py +++ b/nova/api/openstack/compute/contrib/attach_interfaces.py @@ -18,6 +18,7 @@ import webob from webob import exc +from nova.api.openstack import common from nova.api.openstack import extensions from nova import compute from nova import exception @@ -122,6 +123,9 @@ def create(self, req, server_id, body): LOG.exception(e) msg = _("Failed to attach interface") raise webob.exc.HTTPInternalServerError(explanation=msg) + except exception.InstanceInvalidState as state_error: + common.raise_http_conflict_for_instance_invalid_state(state_error, + 'attach_interface') return self.show(req, server_id, vif['id']) @@ -153,6 +157,9 @@ def delete(self, req, server_id, id): except NotImplementedError: msg = _("Network driver does not support this function.") raise webob.exc.HTTPNotImplemented(explanation=msg) + except exception.InstanceInvalidState as state_error: + common.raise_http_conflict_for_instance_invalid_state(state_error, + 'detach_interface') return webob.Response(status_int=202) diff --git a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py index ed805dc574..5932a2b845 100644 --- a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py +++ b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py @@ -123,6 +123,9 @@ def create(self, req, server_id, body): LOG.exception(e) raise webob.exc.HTTPInternalServerError( explanation=e.format_message()) + except exception.InstanceInvalidState as state_error: + common.raise_http_conflict_for_instance_invalid_state(state_error, + 'attach_interface') return self.show(req, server_id, vif['id']) @@ -149,6 +152,9 @@ def delete(self, req, server_id, id): raise exc.HTTPConflict(explanation=e.format_message()) except NotImplementedError as e: raise webob.exc.HTTPNotImplemented(explanation=e.format_message()) + except exception.InstanceInvalidState as state_error: + common.raise_http_conflict_for_instance_invalid_state(state_error, + 'detach_interface') return webob.Response(status_int=202) diff --git a/nova/compute/api.py b/nova/compute/api.py index 1ab06c35d3..665fa93d32 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -2881,6 +2881,9 @@ def swap_volume(self, context, instance, old_volume, new_volume): @wrap_check_policy @check_instance_lock + @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, + vm_states.STOPPED], + task_state=[None]) def attach_interface(self, context, instance, network_id, port_id, requested_ip): """Use hotplug to add an network adapter to an instance.""" @@ -2890,6 +2893,9 @@ def attach_interface(self, context, instance, network_id, port_id, @wrap_check_policy @check_instance_lock + @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, + vm_states.STOPPED], + task_state=[None]) def detach_interface(self, context, instance, port_id): """Detach an network adapter from an instance.""" self.compute_rpcapi.detach_interface(context, instance=instance, diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py index 92792670c4..c1e9d84ce8 100644 --- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py @@ -282,6 +282,45 @@ def test_attach_interface_with_invalid_data(self): attachments.create, req, FAKE_UUID1, jsonutils.loads(req.body)) + def test_attach_interface_with_invalid_state(self): + def fake_attach_interface_invalid_state(*args, **kwargs): + raise exception.InstanceInvalidState( + instance_uuid='', attr='', state='', + method='attach_interface') + + self.stubs.Set(compute_api.API, 'attach_interface', + fake_attach_interface_invalid_state) + attachments = attach_interfaces.InterfaceAttachmentController() + req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req.method = 'POST' + req.body = jsonutils.dumps({'interfaceAttachment': + {'net_id': FAKE_NET_ID1}}) + req.headers['content-type'] = 'application/json' + req.environ['nova.context'] = self.context + self.assertRaises(exc.HTTPConflict, + attachments.create, req, FAKE_UUID1, + jsonutils.loads(req.body)) + + def test_detach_interface_with_invalid_state(self): + def fake_detach_interface_invalid_state(*args, **kwargs): + raise exception.InstanceInvalidState( + instance_uuid='', attr='', state='', + method='detach_interface') + + self.stubs.Set(compute_api.API, 'detach_interface', + fake_detach_interface_invalid_state) + attachments = attach_interfaces.InterfaceAttachmentController() + req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req.method = 'DELETE' + req.body = jsonutils.dumps({}) + req.headers['content-type'] = 'application/json' + req.environ['nova.context'] = self.context + self.assertRaises(exc.HTTPConflict, + attachments.delete, + req, + FAKE_UUID1, + FAKE_NET_ID1) + class InterfaceAttachTestsWithMock(test.NoDBTestCase): def setUp(self): diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py b/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py index 51a4212fc2..f0ed6e45a6 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py @@ -376,6 +376,47 @@ def test_attach_interface_instance_with_non_array_fixed_ips(self): param = {'fixed_ips': 'non_array'} self._test_attach_interface_with_invalid_parameter(param) + def test_attach_interface_with_invalid_state(self): + def fake_attach_interface_invalid_state(*args, **kwargs): + raise exception.InstanceInvalidState( + instance_uuid='', attr='', state='', + method='attach_interface') + + self.stubs.Set(compute_api.API, 'attach_interface', + fake_attach_interface_invalid_state) + attachments = attach_interfaces.InterfaceAttachmentController() + req = webob.Request.blank( + '/v3/servers/fake/os-attach-interfaces/attach') + req.method = 'POST' + req.body = jsonutils.dumps({'interface_attachment': + {'net_id': FAKE_NET_ID1}}) + req.headers['content-type'] = 'application/json' + req.environ['nova.context'] = self.context + self.assertRaises(exc.HTTPConflict, + attachments.create, req, FAKE_UUID1, + body=jsonutils.loads(req.body)) + + def test_detach_interface_with_invalid_state(self): + def fake_detach_interface_invalid_state(*args, **kwargs): + raise exception.InstanceInvalidState( + instance_uuid='', attr='', state='', + method='detach_interface') + + self.stubs.Set(compute_api.API, 'detach_interface', + fake_detach_interface_invalid_state) + attachments = attach_interfaces.InterfaceAttachmentController() + req = webob.Request.blank( + '/v3/servers/fake/os-attach-interfaces/delete') + req.method = 'DELETE' + req.body = jsonutils.dumps({}) + req.headers['content-type'] = 'application/json' + req.environ['nova.context'] = self.context + self.assertRaises(exc.HTTPConflict, + attachments.delete, + req, + FAKE_UUID1, + FAKE_NET_ID1) + class InterfaceAttachTestsWithMock(test.NoDBTestCase): def setUp(self): diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index c1a986e7ff..72a580516c 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -2172,6 +2172,36 @@ def do_test(compute_rpcapi_mock, record_mock, instance_save_mock): do_test() + def _test_attach_interface_invalid_state(self, state): + instance = self._create_instance_obj( + params={'vm_state': state}) + self.assertRaises(exception.InstanceInvalidState, + self.compute_api.attach_interface, + self.context, instance, '', '', '', []) + + def test_attach_interface_invalid_state(self): + for state in [vm_states.BUILDING, vm_states.DELETED, + vm_states.ERROR, vm_states.RESCUED, + vm_states.RESIZED, vm_states.SOFT_DELETED, + vm_states.SUSPENDED, vm_states.SHELVED, + vm_states.SHELVED_OFFLOADED]: + self._test_attach_interface_invalid_state(state) + + def _test_detach_interface_invalid_state(self, state): + instance = self._create_instance_obj( + params={'vm_state': state}) + self.assertRaises(exception.InstanceInvalidState, + self.compute_api.detach_interface, + self.context, instance, '', '', '', []) + + def test_detach_interface_invalid_state(self): + for state in [vm_states.BUILDING, vm_states.DELETED, + vm_states.ERROR, vm_states.RESCUED, + vm_states.RESIZED, vm_states.SOFT_DELETED, + vm_states.SUSPENDED, vm_states.SHELVED, + vm_states.SHELVED_OFFLOADED]: + self._test_detach_interface_invalid_state(state) + class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index ede5974b17..9d583c363f 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -59,6 +59,9 @@ "compute:attach_volume": "", "compute:detach_volume": "", + "compute:attach_interface": "", + "compute:detach_interface": "", + "compute:set_admin_password": "", "compute:rescue": "", From 3883697d31809ab6c5c448bf853bfde69ca53db0 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 20 May 2014 14:55:13 -0700 Subject: [PATCH 115/486] Fix last of direct use of object modules This replaces all uses of nova.objects.. with nova.objects. in the remaining places. Implements-Blueprint: object-subclassing Change-Id: Ic7632cca2455a38abcbdb94feb7e39cfb898bb27 --- nova/cells/messaging.py | 3 +- nova/cells/scheduler.py | 3 +- nova/cmd/dhcpbridge.py | 3 +- nova/conductor/manager.py | 7 +- nova/quota.py | 9 ++- nova/scheduler/filter_scheduler.py | 5 +- nova/tests/cells/test_cells_messaging.py | 6 +- nova/tests/fake_instance.py | 7 +- nova/tests/scheduler/test_filter_scheduler.py | 11 ++- nova/tests/virt/baremetal/test_pxe.py | 24 +++---- nova/tests/virt/libvirt/test_driver.py | 71 +++++++++---------- nova/tests/virt/test_virt_drivers.py | 13 ++-- nova/virt/baremetal/pxe.py | 10 +-- nova/virt/firewall.py | 8 +-- nova/virt/libvirt/driver.py | 10 ++- nova/virt/xenapi/host.py | 5 +- 16 files changed, 89 insertions(+), 106 deletions(-) diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index 55fbfd75f1..64236f4712 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -48,7 +48,6 @@ from nova.network import model as network_model from nova import objects from nova.objects import base as objects_base -from nova.objects import instance_fault as instance_fault_obj from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -1103,7 +1102,7 @@ def instance_fault_create_at_top(self, message, instance_fault, **kwargs): log_str = _("Got message to create instance fault: " "%(instance_fault)s") LOG.debug(log_str, {'instance_fault': instance_fault}) - fault = instance_fault_obj.InstanceFault(context=message.ctxt) + fault = objects.InstanceFault(context=message.ctxt) fault.update(instance_fault) fault.create() diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py index d9552f4324..42d4ff9092 100644 --- a/nova/cells/scheduler.py +++ b/nova/cells/scheduler.py @@ -33,7 +33,6 @@ from nova.i18n import _ from nova import objects from nova.objects import base as obj_base -from nova.objects import instance_action as instance_action_obj from nova.openstack.common import log as logging from nova.scheduler import utils as scheduler_utils from nova import utils @@ -120,7 +119,7 @@ def _create_instances_here(self, ctxt, instance_uuids, instance_properties, def _create_action_here(self, ctxt, instance_uuids): for instance_uuid in instance_uuids: - instance_action_obj.InstanceAction.action_start( + objects.InstanceAction.action_start( ctxt, instance_uuid, instance_actions.CREATE, diff --git a/nova/cmd/dhcpbridge.py b/nova/cmd/dhcpbridge.py index 2abb6a8ffd..114b7484a2 100644 --- a/nova/cmd/dhcpbridge.py +++ b/nova/cmd/dhcpbridge.py @@ -35,7 +35,6 @@ from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import base as objects_base -from nova.objects import network as network_obj from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -72,7 +71,7 @@ def del_lease(mac, ip_address): def init_leases(network_id): """Get the list of hosts for a network.""" ctxt = context.get_admin_context() - network = network_obj.Network.get_by_id(ctxt, network_id) + network = objects.Network.get_by_id(ctxt, network_id) network_manager = importutils.import_object(CONF.network_manager) return network_manager.get_dhcp_leases(ctxt, network) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 97fb87c849..ba102f4e3d 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -39,7 +39,6 @@ from nova import notifications from nova import objects from nova.objects import base as nova_object -from nova.objects import quotas as quotas_obj from nova.openstack.common import excutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -503,9 +502,9 @@ def _cold_migrate(self, context, instance, flavor, filter_properties, request_spec = scheduler_utils.build_request_spec( context, image, [instance], instance_type=flavor) - quotas = quotas_obj.Quotas.from_reservations(context, - reservations, - instance=instance) + quotas = objects.Quotas.from_reservations(context, + reservations, + instance=instance) try: scheduler_utils.populate_retry(filter_properties, instance['uuid']) hosts = self.scheduler_rpcapi.select_destinations( diff --git a/nova/quota.py b/nova/quota.py index 4e1644a178..98cf7fe9dd 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -24,7 +24,7 @@ from nova import db from nova import exception from nova.i18n import _ -from nova.objects import keypair as keypair_obj +from nova import objects from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils @@ -1405,6 +1405,11 @@ def resources(self): return sorted(self._resources.keys()) +def _keypair_get_count_by_user(*args, **kwargs): + """Helper method to avoid referencing objects.KeyPairList on import.""" + return objects.KeyPairList.get_count_by_user(*args, **kwargs) + + QUOTAS = QuotaEngine() @@ -1426,7 +1431,7 @@ def resources(self): CountableResource('security_group_rules', db.security_group_rule_count_by_group, 'quota_security_group_rules'), - CountableResource('key_pairs', keypair_obj.KeyPairList.get_count_by_user, + CountableResource('key_pairs', _keypair_get_count_by_user, 'quota_key_pairs'), ] diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 415f46d32d..35220ea64c 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -26,7 +26,7 @@ from nova.compute import rpcapi as compute_rpcapi from nova import exception from nova.i18n import _ -from nova.objects import instance_group as instance_group_obj +from nova import objects from nova.openstack.common import log as logging from nova.pci import pci_request from nova import rpc @@ -208,8 +208,7 @@ def _setup_instance_group(context, filter_properties): scheduler_hints = filter_properties.get('scheduler_hints') or {} group_hint = scheduler_hints.get('group', None) if group_hint: - group = instance_group_obj.InstanceGroup.get_by_hint(context, - group_hint) + group = objects.InstanceGroup.get_by_hint(context, group_hint) policies = set(('anti-affinity', 'affinity')) if any((policy in policies) for policy in group.policies): update_group_hosts = True diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py index 3acd49bdc4..e4fdfde7f3 100644 --- a/nova/tests/cells/test_cells_messaging.py +++ b/nova/tests/cells/test_cells_messaging.py @@ -33,7 +33,6 @@ from nova import objects from nova.objects import base as objects_base from nova.objects import fields as objects_fields -from nova.objects import instance_fault as instance_fault_obj from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova.openstack.common import uuidutils @@ -1621,7 +1620,7 @@ def test_instance_fault_create_at_top(self): 'message': 'fake-message', 'details': 'fake-details'} - if_mock = mock.Mock(spec_set=instance_fault_obj.InstanceFault) + if_mock = mock.Mock(spec_set=objects.InstanceFault) def _check_create(): self.assertEqual('fake-message', if_mock.message) @@ -1631,8 +1630,7 @@ def _check_create(): if_mock.create.side_effect = _check_create - with mock.patch.object(instance_fault_obj, - 'InstanceFault') as if_obj_mock: + with mock.patch.object(objects, 'InstanceFault') as if_obj_mock: if_obj_mock.return_value = if_mock self.src_msg_runner.instance_fault_create_at_top( self.ctxt, fake_instance_fault) diff --git a/nova/tests/fake_instance.py b/nova/tests/fake_instance.py index e91cf8009c..b1a080269d 100644 --- a/nova/tests/fake_instance.py +++ b/nova/tests/fake_instance.py @@ -17,7 +17,6 @@ from nova import objects from nova.objects import fields -from nova.objects import instance_fault as inst_fault_obj def fake_db_secgroups(instance, names): @@ -103,6 +102,6 @@ def fake_fault_obj(context, instance_uuid, code=404, } if updates: fault.update(updates) - return inst_fault_obj.InstanceFault._from_db_object(context, - inst_fault_obj.InstanceFault(), - fault) + return objects.InstanceFault._from_db_object(context, + objects.InstanceFault(), + fault) diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index a38138c31c..7551a7eec5 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -27,7 +27,7 @@ from nova import context from nova import db from nova import exception -from nova.objects import instance_group as instance_group_obj +from nova import objects from nova.pci import pci_request from nova.scheduler import driver from nova.scheduler import filter_scheduler @@ -375,7 +375,7 @@ def _create_server_group(self): instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) - group = instance_group_obj.InstanceGroup() + group = objects.InstanceGroup() group.name = 'pele' group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] @@ -393,10 +393,9 @@ def _test_group_details_in_filter_properties(self, group, func, hint): } with contextlib.nested( - mock.patch.object(instance_group_obj.InstanceGroup, func, - return_value=group), - mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts', - return_value=['hostA']), + mock.patch.object(objects.InstanceGroup, func, return_value=group), + mock.patch.object(objects.InstanceGroup, 'get_hosts', + return_value=['hostA']), ) as (get_group, get_hosts): update_group_hosts = sched._setup_instance_group(self.context, filter_properties) diff --git a/nova/tests/virt/baremetal/test_pxe.py b/nova/tests/virt/baremetal/test_pxe.py index d0c8f52111..73eb764d3a 100644 --- a/nova/tests/virt/baremetal/test_pxe.py +++ b/nova/tests/virt/baremetal/test_pxe.py @@ -25,7 +25,7 @@ from testtools import matchers from nova import exception -from nova.objects import flavor as flavor_obj +from nova import objects from nova.openstack.common.db import exception as db_exc from nova.tests.image import fake as fake_image from nova.tests import utils @@ -437,15 +437,15 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase): def test_cache_images(self): self._create_node() - self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id') + self.mox.StubOutWithMock(objects.Flavor, 'get_by_id') self.mox.StubOutWithMock(pxe, "get_tftp_image_info") self.mox.StubOutWithMock(self.driver, "_cache_tftp_images") self.mox.StubOutWithMock(self.driver, "_cache_image") self.mox.StubOutWithMock(self.driver, "_inject_into_image") - flavor_obj.Flavor.get_by_id(self.context, - self.instance['instance_type_id'] - ).AndReturn({}) + objects.Flavor.get_by_id(self.context, + self.instance['instance_type_id'] + ).AndReturn({}) pxe.get_tftp_image_info(self.instance, {}).AndReturn([]) self.driver._cache_tftp_images(self.context, self.instance, []) self.driver._cache_image(self.context, self.instance, []) @@ -501,7 +501,7 @@ def test_activate_bootloader_passes_details(self): pxe_path = pxe.get_pxe_config_file_path(self.instance) pxe.get_image_file_path(self.instance) - self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id') + self.mox.StubOutWithMock(objects.Flavor, 'get_by_id') self.mox.StubOutWithMock(pxe, 'get_tftp_image_info') self.mox.StubOutWithMock(pxe, 'get_partition_sizes') self.mox.StubOutWithMock(bm_utils, 'random_alnum') @@ -509,9 +509,9 @@ def test_activate_bootloader_passes_details(self): self.mox.StubOutWithMock(bm_utils, 'write_to_file') self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise') - flavor_obj.Flavor.get_by_id(self.context, - self.instance['instance_type_id'] - ).AndReturn({}) + objects.Flavor.get_by_id(self.context, + self.instance['instance_type_id'] + ).AndReturn({}) pxe.get_tftp_image_info(self.instance, {}).AndReturn(image_info) pxe.get_partition_sizes(self.instance).AndReturn((0, 0, 0)) bm_utils.random_alnum(32).AndReturn('alnum') @@ -533,7 +533,7 @@ def test_activate_bootloader_passes_details(self): def test_activate_and_deactivate_bootloader(self): self._create_node() - flavor = flavor_obj.Flavor( + flavor = objects.Flavor( context=self.context, extra_specs={ 'baremetal:deploy_kernel_id': 'eeee', @@ -541,13 +541,13 @@ def test_activate_and_deactivate_bootloader(self): }) self.instance['uuid'] = 'fake-uuid' - self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id') + self.mox.StubOutWithMock(objects.Flavor, 'get_by_id') self.mox.StubOutWithMock(bm_utils, 'write_to_file') self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise') self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise') self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise') - flavor_obj.Flavor.get_by_id( + objects.Flavor.get_by_id( self.context, self.instance['instance_type_id']).AndReturn( flavor) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 970a3b69a6..79908847ee 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -46,9 +46,6 @@ from nova import exception from nova.network import model as network_model from nova import objects -from nova.objects import flavor as flavor_obj -from nova.objects import pci_device as pci_device_obj -from nova.objects import service as service_obj from nova.openstack.common import fileutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -634,7 +631,7 @@ def test_set_host_enabled_swallows_exceptions(self): def create_instance_obj(self, context, **params): default_params = self.test_instance - default_params['pci_devices'] = pci_device_obj.PciDeviceList() + default_params['pci_devices'] = objects.PciDeviceList() default_params.update(params) instance = objects.Instance(context, **params) flavor = flavors.get_default_flavor() @@ -902,7 +899,7 @@ def set_close_callback(cb, opaque): mock.patch.object(conn, "_connect", return_value=self.conn), mock.patch.object(self.conn, "registerCloseCallback", side_effect=set_close_callback), - mock.patch.object(service_obj.Service, "get_by_compute_host", + mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): # verify that the driver registers for the close callback @@ -929,7 +926,7 @@ def test_close_callback_bad_signature(self): mock.patch.object(conn, "_connect", return_value=self.conn), mock.patch.object(self.conn, "registerCloseCallback", side_effect=TypeError('dd')), - mock.patch.object(service_obj.Service, "get_by_compute_host", + mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): connection = conn._get_connection() @@ -947,7 +944,7 @@ def test_close_callback_not_defined(self): mock.patch.object(conn, "_connect", return_value=self.conn), mock.patch.object(self.conn, "registerCloseCallback", side_effect=AttributeError('dd')), - mock.patch.object(service_obj.Service, "get_by_compute_host", + mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): connection = conn._get_connection() @@ -1552,9 +1549,8 @@ def test_get_guest_config_with_watchdog_action_through_flavor(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - fake_flavor = flavor_obj.Flavor.get_by_id( - self.context, - self.test_instance['instance_type_id']) + fake_flavor = objects.Flavor.get_by_id( + self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'hw_watchdog_action': 'none'} instance_ref = db.instance_create(self.context, self.test_instance) @@ -1562,7 +1558,7 @@ def test_get_guest_config_with_watchdog_action_through_flavor(self): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref) - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.Flavor, 'get_by_id', return_value=fake_flavor): cfg = conn._get_guest_config(instance_ref, [], {}, disk_info) @@ -1591,9 +1587,8 @@ def test_get_guest_config_with_watchdog_action_meta_overrides_flavor(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - fake_flavor = flavor_obj.Flavor.get_by_id( - self.context, - self.test_instance['instance_type_id']) + fake_flavor = objects.Flavor.get_by_id( + self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'hw_watchdog_action': 'none'} instance_ref = db.instance_create(self.context, self.test_instance) @@ -1603,7 +1598,7 @@ def test_get_guest_config_with_watchdog_action_meta_overrides_flavor(self): image_meta = {"properties": {"hw_watchdog_action": "pause"}} - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.Flavor, 'get_by_id', return_value=fake_flavor): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) @@ -1713,7 +1708,7 @@ def test_get_guest_config_with_video_driver_vram(self): agent_enabled=True, group='spice') - instance_type = flavor_obj.Flavor.get_by_id(self.context, 5) + instance_type = objects.Flavor.get_by_id(self.context, 5) instance_type.extra_specs = {'hw_video:ram_max_mb': "100"} conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -1722,7 +1717,7 @@ def test_get_guest_config_with_video_driver_vram(self): instance_ref) image_meta = {"properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}} - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.Flavor, 'get_by_id', return_value=instance_type): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) @@ -1772,7 +1767,7 @@ def test_video_driver_ram_above_flavor_limit(self): agent_enabled=True, group='spice') - instance_type = flavor_obj.Flavor.get_by_id(self.context, 5) + instance_type = objects.Flavor.get_by_id(self.context, 5) instance_type.extra_specs = {'hw_video:ram_max_mb': "50"} conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -1781,7 +1776,7 @@ def test_video_driver_ram_above_flavor_limit(self): instance_ref) image_meta = {"properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}} - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.Flavor, 'get_by_id', return_value=instance_type): self.assertRaises(exception.RequestedVRamTooHigh, conn._get_guest_config, @@ -1824,7 +1819,7 @@ def test_get_guest_config_with_rng_device(self): use_usb_tablet=False, group='libvirt') - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'hw_rng:allowed': 'True'} @@ -1835,7 +1830,7 @@ def test_get_guest_config_with_rng_device(self): instance_ref) image_meta = {"properties": {"hw_rng_model": "virtio"}} - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.Flavor, 'get_by_id', return_value=fake_flavor): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) @@ -1892,7 +1887,7 @@ def test_get_guest_config_with_rng_limits(self): use_usb_tablet=False, group='libvirt') - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'hw_rng:allowed': 'True', @@ -1905,7 +1900,7 @@ def test_get_guest_config_with_rng_limits(self): instance_ref) image_meta = {"properties": {"hw_rng_model": "virtio"}} - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.Flavor, 'get_by_id', return_value=fake_flavor): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) @@ -1935,7 +1930,7 @@ def test_get_guest_config_with_rng_backend(self): rng_dev_path='/dev/hw_rng', group='libvirt') - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'hw_rng:allowed': 'True'} @@ -1946,7 +1941,7 @@ def test_get_guest_config_with_rng_backend(self): instance_ref) image_meta = {"properties": {"hw_rng_model": "virtio"}} - with contextlib.nested(mock.patch.object(flavor_obj.Flavor, + with contextlib.nested(mock.patch.object(objects.Flavor, 'get_by_id', return_value=fake_flavor), mock.patch('nova.virt.libvirt.driver.os.path.exists', @@ -1979,7 +1974,7 @@ def test_get_guest_config_with_rng_dev_not_present(self): rng_dev_path='/dev/hw_rng', group='libvirt') - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'hw_rng:allowed': 'True'} @@ -1990,7 +1985,7 @@ def test_get_guest_config_with_rng_dev_not_present(self): instance_ref) image_meta = {"properties": {"hw_rng_model": "virtio"}} - with contextlib.nested(mock.patch.object(flavor_obj.Flavor, + with contextlib.nested(mock.patch.object(objects.Flavor, 'get_by_id', return_value=fake_flavor), mock.patch('nova.virt.libvirt.driver.os.path.exists', @@ -2006,7 +2001,7 @@ def test_get_guest_config_with_cpu_quota(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.flavor.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'quota:cpu_shares': '10000', @@ -2017,7 +2012,7 @@ def test_get_guest_config_with_cpu_quota(self): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref) - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.flavor.Flavor, 'get_by_id', return_value=fake_flavor): cfg = conn._get_guest_config(instance_ref, [], {}, disk_info) @@ -2029,7 +2024,7 @@ def test_get_guest_config_with_bogus_cpu_quota(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.flavor.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.extra_specs = {'quota:cpu_shares': 'fishfood', @@ -2040,7 +2035,7 @@ def test_get_guest_config_with_bogus_cpu_quota(self): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref) - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.flavor.Flavor, 'get_by_id', return_value=fake_flavor): self.assertRaises(ValueError, conn._get_guest_config, @@ -2557,7 +2552,7 @@ def get_lib_version_stub(): self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_topology(self): - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.flavor.Flavor.get_by_id( self.context, self.test_instance['instance_type_id']) fake_flavor.vcpus = 8 @@ -2568,7 +2563,7 @@ def test_get_guest_cpu_topology(self): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref) - with mock.patch.object(flavor_obj.Flavor, 'get_by_id', + with mock.patch.object(objects.flavor.Flavor, 'get_by_id', return_value=fake_flavor): conf = conn._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), @@ -5624,7 +5619,7 @@ def test_service_resume_after_broken_connection(self): with contextlib.nested( mock.patch.object(libvirt, 'openAuth', return_value=mock.MagicMock()), - mock.patch.object(service_obj.Service, "get_by_compute_host", + mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): conn.get_num_instances() @@ -7540,7 +7535,7 @@ def _test_attach_detach_interface_get_config(self, method_name): else: raise ValueError("Unhandled method %" % method_name) - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.Flavor.get_by_id( self.context, test_instance['instance_type_id']) expected = conn.vif_driver.get_config(test_instance, network_info[0], fake_image_meta, @@ -7548,7 +7543,7 @@ def _test_attach_detach_interface_get_config(self, method_name): self.mox.StubOutWithMock(conn.vif_driver, 'get_config') conn.vif_driver.get_config(test_instance, network_info[0], fake_image_meta, - mox.IsA(flavor_obj.Flavor)).\ + mox.IsA(objects.Flavor)).\ AndReturn(expected) self.mox.ReplayAll() @@ -9963,7 +9958,7 @@ def _test_attach_detach_interface(self, method, power_state, self.libvirtconnection.firewall_driver.setup_basic_filtering( instance, [network_info[0]]) - fake_flavor = flavor_obj.Flavor.get_by_id( + fake_flavor = objects.Flavor.get_by_id( self.context, instance['instance_type_id']) if method == 'attach_interface': fake_image_meta = {'id': instance['image_ref']} @@ -9977,7 +9972,7 @@ def _test_attach_detach_interface(self, method, power_state, self.libvirtconnection.vif_driver.get_config( instance, network_info[0], fake_image_meta, - mox.IsA(flavor_obj.Flavor)).AndReturn(expected) + mox.IsA(objects.Flavor)).AndReturn(expected) domain.info().AndReturn([power_state]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), expected_flags) diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index 947a67a7e0..ff51d4c346 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -23,6 +23,7 @@ from nova.compute import manager from nova import exception +from nova import objects from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -784,8 +785,7 @@ def test_internal_set_host_enabled(self): # Previous status of the service: disabled: False service_mock.configure_mock(disabled_reason='None', disabled=False) - from nova.objects import service as service_obj - with mock.patch.object(service_obj.Service, "get_by_compute_host", + with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) @@ -798,8 +798,7 @@ def test_set_host_enabled_when_auto_disabled(self): # Previous status of the service: disabled: True, 'AUTO: ERROR' service_mock.configure_mock(disabled_reason='AUTO: ERROR', disabled=True) - from nova.objects import service as service_obj - with mock.patch.object(service_obj.Service, "get_by_compute_host", + with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(True) self.assertFalse(service_mock.disabled) @@ -812,8 +811,7 @@ def test_set_host_enabled_when_manually_disabled(self): # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) - from nova.objects import service as service_obj - with mock.patch.object(service_obj.Service, "get_by_compute_host", + with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(True) self.assertTrue(service_mock.disabled) @@ -826,8 +824,7 @@ def test_set_host_enabled_dont_override_manually_disabled(self): # Previous status of the service: disabled: True, 'Manually disabled' service_mock.configure_mock(disabled_reason='Manually disabled', disabled=True) - from nova.objects import service as service_obj - with mock.patch.object(service_obj.Service, "get_by_compute_host", + with mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index 5b57aa2928..72d5a02169 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -27,7 +27,7 @@ from nova.compute import flavors from nova import exception from nova.i18n import _ -from nova.objects import flavor as flavor_obj +from nova import objects from nova.openstack.common.db import exception as db_exc from nova.openstack.common import fileutils from nova.openstack.common import log as logging @@ -334,8 +334,8 @@ def _inject_into_image(self, context, node, instance, network_info, def cache_images(self, context, node, instance, admin_password, image_meta, injected_files, network_info): """Prepare all the images for this instance.""" - flavor = flavor_obj.Flavor.get_by_id(context, - instance['instance_type_id']) + flavor = objects.Flavor.get_by_id(context, + instance['instance_type_id']) tftp_image_info = get_tftp_image_info(instance, flavor) self._cache_tftp_images(context, instance, tftp_image_info) @@ -379,8 +379,8 @@ def activate_bootloader(self, context, node, instance, network_info): ./pxelinux.cfg/ {mac} -> ../{uuid}/config """ - flavor = flavor_obj.Flavor.get_by_id(context, - instance['instance_type_id']) + flavor = objects.Flavor.get_by_id(context, + instance['instance_type_id']) image_info = get_tftp_image_info(instance, flavor) (root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance) pxe_config_file_path = get_pxe_config_file_path(instance) diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index ed3ff026af..452edbf05a 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -23,8 +23,6 @@ from nova.i18n import _LI from nova.network import linux_net from nova import objects -from nova.objects import security_group as security_group_obj -from nova.objects import security_group_rule as security_group_rule_obj from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils @@ -357,13 +355,13 @@ def instance_rules(self, instance, network_info): # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) - security_groups = security_group_obj.SecurityGroupList.get_by_instance( + security_groups = objects.SecurityGroupList.get_by_instance( ctxt, instance) # then, security group chains and rules for security_group in security_groups: - rules_cls = security_group_rule_obj.SecurityGroupRuleList - rules = rules_cls.get_by_security_group(ctxt, security_group) + rules = objects.SecurityGroupRuleList.get_by_security_group( + ctxt, security_group) for rule in rules: if not rule['cidr']: diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index f4f89bb2da..5da030bbaa 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -62,8 +62,6 @@ from nova.i18n import _LW from nova import image from nova import objects -from nova.objects import flavor as flavor_obj -from nova.objects import service as service_obj from nova.openstack.common import excutils from nova.openstack.common import fileutils from nova.openstack.common import importutils @@ -1422,7 +1420,7 @@ def detach_volume(self, connection_info, instance, mountpoint, def attach_interface(self, instance, image_meta, vif): virt_dom = self._lookup_by_name(instance['name']) - flavor = flavor_obj.Flavor.get_by_id( + flavor = objects.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) self.vif_driver.plug(instance, vif) @@ -1443,7 +1441,7 @@ def attach_interface(self, instance, image_meta, vif): def detach_interface(self, instance, vif): virt_dom = self._lookup_by_name(instance['name']) - flavor = flavor_obj.Flavor.get_by_id( + flavor = objects.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) cfg = self.vif_driver.get_config(instance, vif, None, flavor) @@ -2881,7 +2879,7 @@ def _set_host_enabled(self, enabled, ctx = nova_context.get_admin_context() try: - service = service_obj.Service.get_by_compute_host(ctx, CONF.host) + service = objects.Service.get_by_compute_host(ctx, CONF.host) if service.disabled != disable_service: # Note(jang): this is a quick fix to stop operator- @@ -3183,7 +3181,7 @@ def _get_guest_config(self, instance, network_info, image_meta, 'kernel_id' if a kernel is needed for the rescue image. """ - flavor = flavor_obj.Flavor.get_by_id( + flavor = objects.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) inst_path = libvirt_utils.get_instance_path(instance) diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py index c842df8a7b..e66c0eda90 100644 --- a/nova/virt/xenapi/host.py +++ b/nova/virt/xenapi/host.py @@ -27,7 +27,6 @@ from nova import exception from nova.i18n import _ from nova import objects -from nova.objects import service as service_obj from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.pci import pci_whitelist @@ -120,8 +119,8 @@ def set_host_enabled(self, enabled): # Since capabilities are gone, use service table to disable a node # in scheduler cntxt = context.get_admin_context() - service = service_obj.Service.get_by_args(cntxt, CONF.host, - 'nova-compute') + service = objects.Service.get_by_args(cntxt, CONF.host, + 'nova-compute') service.disabled = not enabled service.disabled_reason = 'set by xenapi host_state' service.save() From a8cb9827b716d2341208db9ae50e447eb0b1da2c Mon Sep 17 00:00:00 2001 From: Scott Reeve Date: Tue, 22 Jul 2014 12:14:41 +0200 Subject: [PATCH 116/486] Fix typo Change-Id: I674512d817d44897391e62439e1730bdf89ca9b1 --- doc/source/devref/development.environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 788387925f..255ece68a7 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -123,7 +123,7 @@ Using fake computes for tests ----------------------------- The number of instances supported by fake computes is not limited by physical -constraints. It allows to perform stress tests on a deployment with few +constraints. It allows you to perform stress tests on a deployment with few resources (typically a laptop). But you must avoid using scheduler filters limiting the number of instances per compute (like RamFilter, DiskFilter, AggregateCoreFilter), otherwise they will limit the number of instances per From 9a0f85953638a64c7589ffbbd798106f8d7e44b2 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Thu, 3 Jul 2014 01:45:58 +0800 Subject: [PATCH 117/486] Add debug log for availability zone filter Sometimes operator need information why the host doesn't pass the check of scheduler, this patch adds information for available zone filter if the host doesn't belong to required available zone and modify the logic a little to make it less nested. Change-Id: Icb45182fa58d69e7c5e3a77ea31eddc492ac46ec Partial-Bug: #1301830 --- .../filters/availability_zone_filter.py | 33 ++++++++++++++----- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py index 390aabb34d..1452febd29 100644 --- a/nova/scheduler/filters/availability_zone_filter.py +++ b/nova/scheduler/filters/availability_zone_filter.py @@ -16,8 +16,11 @@ from oslo.config import cfg from nova import db +from nova.openstack.common import log as logging from nova.scheduler import filters +LOG = logging.getLogger(__name__) + CONF = cfg.CONF CONF.import_opt('default_availability_zone', 'nova.availability_zones') @@ -38,13 +41,25 @@ def host_passes(self, host_state, filter_properties): props = spec.get('instance_properties', {}) availability_zone = props.get('availability_zone') - if availability_zone: - context = filter_properties['context'] - metadata = db.aggregate_metadata_get_by_host( - context, host_state.host, key='availability_zone') - if 'availability_zone' in metadata: - return availability_zone in metadata['availability_zone'] - else: - return availability_zone == CONF.default_availability_zone + if not availability_zone: + return True + + context = filter_properties['context'] + metadata = db.aggregate_metadata_get_by_host( + context, host_state.host, key='availability_zone') + + if 'availability_zone' in metadata: + hosts_passes = availability_zone in metadata['availability_zone'] + host_az = metadata['availability_zone'] + else: + hosts_passes = availability_zone == CONF.default_availability_zone + host_az = CONF.default_availability_zone + + if not hosts_passes: + LOG.debug("Availability Zone '%(az)s' requested. " + "%(host_state)s has AZs: %(host_az)s", + {'host_state': host_state, + 'az': availability_zone, + 'host_az': host_az}) - return True + return hosts_passes From 3e933e70dcaae36a1ad9f5c0d7494a52ada1648d Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 2 Apr 2014 05:43:54 -0700 Subject: [PATCH 118/486] VMware: power_off_instance support Commit 1e1915aaaca38b5691794e0e052a42b9d95dd3c2 introduced a new utility method to shutdown a VM. This will now be moved to vm_util similar to refactoring in blueprint vmware-spawn-refactor. This patch does the following: 1. Creates a vm_util method - power_off_instance. 2. Corrects the unit test for the unrescue. That did not correctly validate the correct vm ref for the power off 3. Added specific tests for the new method that has been added 4. Drops the test test_power_off_suspended as this is not a valid test case. Change-Id: I6b7c260488bbd0278f6e7ce98be77e958f1576ab --- nova/tests/virt/vmwareapi/test_driver_api.py | 32 ++++----- nova/tests/virt/vmwareapi/test_vm_util.py | 69 ++++++++++++++++++++ nova/tests/virt/vmwareapi/test_vmops.py | 5 +- nova/virt/vmwareapi/vm_util.py | 16 +++++ nova/virt/vmwareapi/vmops.py | 37 +---------- 5 files changed, 105 insertions(+), 54 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index a4a0937c5c..6b7ef04984 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -1412,15 +1412,6 @@ def test_power_off_non_existent(self): self.assertRaises(exception.InstanceNotFound, self.conn.power_off, self.instance) - def test_power_off_suspended(self): - self._create_vm() - self.conn.suspend(self.instance) - info = self.conn.get_info({'uuid': self.uuid, - 'node': self.instance_node}) - self._check_vm_info(info, power_state.SUSPENDED) - self.assertRaises(exception.InstancePowerOffFailure, - self.conn.power_off, self.instance) - def test_resume_state_on_host_boot(self): self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') @@ -1623,14 +1614,18 @@ def test_unrescue(self): # with power_on=True, the test_destroy_rescued tests the # vmops.unrescue with power_on=False self._rescue() - self.test_vm_ref = None - self.test_device_name = None vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) + vm_rescue_ref = vm_util.get_vm_ref_from_name(self.conn._session, + '%s-rescue' % self.uuid) + + self.poweroff_instance = vm_util.power_off_instance - def fake_power_off_vm_ref(vm_ref): - self.test_vm_ref = vm_ref - self.assertIsNotNone(vm_ref) + def fake_power_off_instance(session, instance, vm_ref): + # This is called so that we actually poweroff the simulated vm. + # The reason for this is that there is a validation in destroy + # that the instance is not powered on. + self.poweroff_instance(session, instance, vm_ref) def fake_detach_disk_from_vm(vm_ref, instance, device_name, destroy_disk=False): @@ -1639,15 +1634,16 @@ def fake_detach_disk_from_vm(vm_ref, instance, self._check_vm_info(info, power_state.SHUTDOWN) with contextlib.nested( - mock.patch.object(self.conn._vmops, "_power_off_vm_ref", - side_effect=fake_power_off_vm_ref), + mock.patch.object(vm_util, "power_off_instance", + side_effect=fake_power_off_instance), mock.patch.object(self.conn._volumeops, "detach_disk_from_vm", side_effect=fake_detach_disk_from_vm), mock.patch.object(vm_util, "power_on_instance"), ) as (poweroff, detach, fake_power_on): self.conn.unrescue(self.instance, None) - poweroff.assert_called_once_with(self.test_vm_ref) - detach.assert_called_once_with(self.test_vm_ref, mock.ANY, + poweroff.assert_called_once_with(self.conn._session, mock.ANY, + vm_rescue_ref) + detach.assert_called_once_with(vm_rescue_ref, mock.ANY, self.test_device_name) fake_power_on.assert_called_once_with(self.conn._session, self.instance, diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py index a6013a2b14..62b016696a 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util.py +++ b/nova/tests/virt/vmwareapi/test_vm_util.py @@ -837,3 +837,72 @@ def test_get_network_detach_config_spec(self): expected = re.sub(r'\s+', '', expected) result = re.sub(r'\s+', '', repr(result)) self.assertEqual(expected, result) + + @mock.patch.object(vm_util, "get_vm_ref") + def test_power_off_instance(self, fake_get_ref): + session = fake.FakeSession() + fake_instance = mock.MagicMock() + with contextlib.nested( + mock.patch.object(session, '_call_method', + return_value='fake-task'), + mock.patch.object(session, '_wait_for_task') + ) as (fake_call_method, fake_wait_for_task): + vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref') + fake_call_method.assert_called_once_with(session._get_vim(), + "PowerOffVM_Task", + 'fake-vm-ref') + fake_wait_for_task.assert_called_once_with('fake-task') + self.assertFalse(fake_get_ref.called) + + @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref") + def test_power_off_instance_no_vm_ref(self, fake_get_ref): + session = fake.FakeSession() + fake_instance = mock.MagicMock() + with contextlib.nested( + mock.patch.object(session, '_call_method', + return_value='fake-task'), + mock.patch.object(session, '_wait_for_task') + ) as (fake_call_method, fake_wait_for_task): + vm_util.power_off_instance(session, fake_instance) + fake_get_ref.assert_called_once_with(session, fake_instance) + fake_call_method.assert_called_once_with(session._get_vim(), + "PowerOffVM_Task", + 'fake-vm-ref') + fake_wait_for_task.assert_called_once_with('fake-task') + + @mock.patch.object(vm_util, "get_vm_ref") + def test_power_off_instance_with_exception(self, fake_get_ref): + session = fake.FakeSession() + fake_instance = mock.MagicMock() + with contextlib.nested( + mock.patch.object(session, '_call_method', + return_value='fake-task'), + mock.patch.object(session, '_wait_for_task', + side_effect=exception.NovaException('fake')) + ) as (fake_call_method, fake_wait_for_task): + self.assertRaises(exception.NovaException, + vm_util.power_off_instance, + session, fake_instance, 'fake-vm-ref') + fake_call_method.assert_called_once_with(session._get_vim(), + "PowerOffVM_Task", + 'fake-vm-ref') + fake_wait_for_task.assert_called_once_with('fake-task') + self.assertFalse(fake_get_ref.called) + + @mock.patch.object(vm_util, "get_vm_ref") + def test_power_off_instance_power_state_exception(self, fake_get_ref): + session = fake.FakeSession() + fake_instance = mock.MagicMock() + with contextlib.nested( + mock.patch.object(session, '_call_method', + return_value='fake-task'), + mock.patch.object( + session, '_wait_for_task', + side_effect=error_util.InvalidPowerStateException) + ) as (fake_call_method, fake_wait_for_task): + vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref') + fake_call_method.assert_called_once_with(session._get_vim(), + "PowerOffVM_Task", + 'fake-vm-ref') + fake_wait_for_task.assert_called_once_with('fake-task') + self.assertFalse(fake_get_ref.called) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index bbe39f9c9a..35dc094402 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -406,7 +406,7 @@ def fake_call_method(module, method, *args, **kwargs): return_value=vm_rescue_ref), mock.patch.object(self._session, '_call_method', fake_call_method), - mock.patch.object(self._vmops, '_power_off_vm_ref'), + mock.patch.object(vm_util, 'power_off_instance'), mock.patch.object(self._vmops, '_destroy_instance'), mock.patch.object(copy, 'deepcopy', return_value=r_instance) ) as (_get_vmdk_path_and_adapter_type, _get_vmdk_volume_disk, @@ -427,7 +427,8 @@ def fake_call_method(module, method, *args, **kwargs): self._instance) _get_vm_ref_from_name.assert_called_once_with(self._session, 'fake_uuid-rescue') - _power_off.assert_called_once_with(vm_rescue_ref) + _power_off.assert_called_once_with(self._session, r_instance, + vm_rescue_ref) _destroy_instance.assert_called_once_with(r_instance, instance_name='fake_uuid-rescue') diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a9dacd134e..d73794e19f 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1479,3 +1479,19 @@ def get_vm_detach_port_index(session, vm_ref, iface_id): if (option.key.startswith('nvp.iface-id.') and option.value == iface_id): return int(option.key.split('.')[2]) + + +def power_off_instance(session, instance, vm_ref=None): + """Power off the specified instance.""" + + if vm_ref is None: + vm_ref = get_vm_ref(session, instance) + + LOG.debug("Powering off the VM", instance=instance) + try: + poweroff_task = session._call_method(session._get_vim(), + "PowerOffVM_Task", vm_ref) + session._wait_for_task(poweroff_task) + LOG.debug("Powered off the VM", instance=instance) + except error_util.InvalidPowerStateException: + LOG.debug("VM already powered off", instance=instance) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index a726e1a3cb..57294e0e6c 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -900,12 +900,7 @@ def _destroy_instance(self, instance, destroy_disks=True, # Power off the VM if it is in PoweredOn state. if pwr_state == "poweredOn": - LOG.debug("Powering off the VM", instance=instance) - poweroff_task = self._session._call_method( - self._session._get_vim(), - "PowerOffVM_Task", vm_ref) - self._session._wait_for_task(poweroff_task) - LOG.debug("Powered off the VM", instance=instance) + vm_util.power_off_instance(self._session, instance, vm_ref) # Un-register the VM try: @@ -1066,44 +1061,18 @@ def unrescue(self, instance, power_on=True): "get_dynamic_property", vm_rescue_ref, "VirtualMachine", "config.hardware.device") device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path) - self._power_off_vm_ref(vm_rescue_ref) + vm_util.power_off_instance(self._session, r_instance, vm_rescue_ref) self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device) self._destroy_instance(r_instance, instance_name=instance_name) if power_on: vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref) - def _power_off_vm_ref(self, vm_ref): - """Power off the specifed vm. - - :param vm_ref: a reference object to the VM. - """ - poweroff_task = self._session._call_method( - self._session._get_vim(), - "PowerOffVM_Task", vm_ref) - self._session._wait_for_task(poweroff_task) - def power_off(self, instance): """Power off the specified instance. :param instance: nova.objects.instance.Instance """ - vm_ref = vm_util.get_vm_ref(self._session, instance) - - pwr_state = self._session._call_method(vim_util, - "get_dynamic_property", vm_ref, - "VirtualMachine", "runtime.powerState") - # Only PoweredOn VMs can be powered off. - if pwr_state == "poweredOn": - LOG.debug("Powering off the VM", instance=instance) - self._power_off_vm_ref(vm_ref) - LOG.debug("Powered off the VM", instance=instance) - # Raise Exception if VM is suspended - elif pwr_state == "suspended": - reason = _("instance is suspended and cannot be powered off.") - raise exception.InstancePowerOffFailure(reason=reason) - else: - LOG.debug("VM was already in powered off state. So returning " - "without doing anything", instance=instance) + vm_util.power_off_instance(self._session, instance) def power_on(self, instance): vm_util.power_on_instance(self._session, instance) From 712108a3e118d8497a38e21a0427afbe60f90b49 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 21 Jul 2014 13:02:57 +0800 Subject: [PATCH 119/486] Add valid method check for quota resources ReservableResource and CountableResource comes down to the method used to count the in-use resources. For ReservableResource, the count is based on the number of database objects matching the project ID, whereas with CountableResource, a counting function has to be specified; otherwise, the classes are identical and designed to be used identically.indeed, CountableResource extends ReservableResource and only overrides the constructor. This patch add some sanity-checking to limit_check() and reserve() that ensure that they only work with the correct resource type(s) Change-Id: Ibec048a5d8c2c4a8685f5f3cc2076c64b39654a3 Partial-Bug: #1301532 --- nova/exception.py | 4 ++++ nova/quota.py | 26 +++++++++++++++++++++++-- nova/tests/test_quota.py | 41 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 8d027dce6d..0bc8bff259 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -823,6 +823,10 @@ class InvalidQuotaValue(Invalid): "resources: %(unders)s") +class InvalidQuotaMethodUsage(Invalid): + msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s") + + class QuotaNotFound(NotFound): msg_fmt = _("Quota could not be found") diff --git a/nova/quota.py b/nova/quota.py index 98cf7fe9dd..b98282ecb1 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -421,6 +421,7 @@ def limit_check(self, context, resources, values, project_id=None, is admin and admin wants to impact on common user. """ + _valid_method_call_check_resources(values, 'check') # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] @@ -502,6 +503,7 @@ def reserve(self, context, resources, deltas, expire=None, is admin and admin wants to impact on common user. """ + _valid_method_call_check_resources(deltas, 'reserve') # Set up the reservation expiration if expire is None: @@ -994,6 +996,7 @@ def default(self): class ReservableResource(BaseResource): """Describe a reservable resource.""" + valid_method = 'reserve' def __init__(self, name, sync, flag=None): """Initializes a ReservableResource. @@ -1031,8 +1034,7 @@ def __init__(self, name, sync, flag=None): class AbsoluteResource(BaseResource): """Describe a non-reservable resource.""" - - pass + valid_method = 'check' class CountableResource(AbsoluteResource): @@ -1096,6 +1098,10 @@ def _driver(self): def __contains__(self, resource): return resource in self._resources + def __getitem__(self, key): + if key in self._resources: + return self._resources[key] + def register_resource(self, resource): """Register a resource.""" @@ -1437,3 +1443,19 @@ def _keypair_get_count_by_user(*args, **kwargs): QUOTAS.register_resources(resources) + + +def _valid_method_call_check_resource(name, method): + if name not in QUOTAS: + raise exception.InvalidQuotaMethodUsage(method=method, res=name) + res = QUOTAS[name] + + if res.valid_method != method: + raise exception.InvalidQuotaMethodUsage(method=method, res=name) + + +def _valid_method_call_check_resources(resource, method): + """A method to check whether the resource can use the quota method.""" + + for name in resource.keys(): + _valid_method_call_check_resource(name, method) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 37fc73db55..b00ed7e9e1 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -428,6 +428,47 @@ def test_quota_with_project_override_class(self): self.assertEqual(quota_value, 20) + def test_valid_method_call_check_invalid_input(self): + resources = {'dummy': 1} + + self.assertRaises(exception.InvalidQuotaMethodUsage, + quota._valid_method_call_check_resources, + resources, 'limit') + + def test_valid_method_call_check_invalid_method(self): + resources = {'key_pairs': 1} + + self.assertRaises(exception.InvalidQuotaMethodUsage, + quota._valid_method_call_check_resources, + resources, 'dummy') + + def test_valid_method_call_check_multiple(self): + resources = {'key_pairs': 1, 'dummy': 2} + + self.assertRaises(exception.InvalidQuotaMethodUsage, + quota._valid_method_call_check_resources, + resources, 'check') + + resources = {'key_pairs': 1, 'instances': 2, 'dummy': 3} + + self.assertRaises(exception.InvalidQuotaMethodUsage, + quota._valid_method_call_check_resources, + resources, 'check') + + def test_valid_method_call_check_wrong_method_reserve(self): + resources = {'key_pairs': 1} + + self.assertRaises(exception.InvalidQuotaMethodUsage, + quota._valid_method_call_check_resources, + resources, 'reserve') + + def test_valid_method_call_check_wrong_method_check(self): + resources = {'fixed_ips': 1} + + self.assertRaises(exception.InvalidQuotaMethodUsage, + quota._valid_method_call_check_resources, + resources, 'check') + class QuotaEngineTestCase(test.TestCase): def test_init(self): From a4e3ea4fdaeb4f850f3deb2b7098da19d015d7b9 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Tue, 22 Jul 2014 20:57:53 +0800 Subject: [PATCH 120/486] Correct InvalidAggregateAction reason for Xen Currently the xenapi will report InvalidAggregateAction when add aggreate under some conditions. But the reason didn't use the pre-defined reason list. Change-Id: I2cd65f26097f154f1a4f794916c74cd1730ae142 --- nova/tests/virt/xenapi/test_xenapi.py | 21 ++++++++++++--------- nova/virt/xenapi/pool.py | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index ede8be7007..bc7bd3b15b 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -3072,27 +3072,30 @@ def test_add_host_to_aggregate_invalid_changing_status(self): aggregate is not ready. """ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.add_to_aggregate, self.context, - aggregate, 'host') + ex = self.assertRaises(exception.InvalidAggregateAction, + self.conn.add_to_aggregate, self.context, + aggregate, 'host') + self.assertIn('setup in progress', str(ex)) def test_add_host_to_aggregate_invalid_dismissed_status(self): """Ensure InvalidAggregateAction is raised when aggregate is deleted. """ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.add_to_aggregate, self.context, - aggregate, 'fake_host') + ex = self.assertRaises(exception.InvalidAggregateAction, + self.conn.add_to_aggregate, self.context, + aggregate, 'fake_host') + self.assertIn('aggregate deleted', str(ex)) def test_add_host_to_aggregate_invalid_error_status(self): """Ensure InvalidAggregateAction is raised when aggregate is in error. """ aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR) - self.assertRaises(exception.InvalidAggregateAction, - self.conn.add_to_aggregate, self.context, - aggregate, 'fake_host') + ex = self.assertRaises(exception.InvalidAggregateAction, + self.conn.add_to_aggregate, self.context, + aggregate, 'fake_host') + self.assertIn('aggregate in error', str(ex)) def test_remove_host_from_aggregate_error(self): # Ensure we can remove a host from an aggregate even if in error. diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index f8d2e4f927..8cdcb37adb 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -78,7 +78,7 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None): raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate['id'], - reason=aggregate['metadata'][pool_states.KEY]) + reason=invalid[aggregate['metadata'][pool_states.KEY]]) if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) From 1fb727876fff2f796de6442730527556c16a347e Mon Sep 17 00:00:00 2001 From: Christopher Lefelhocz Date: Mon, 21 Jul 2014 20:06:39 -0500 Subject: [PATCH 121/486] Remove unneeded calls in test_shelve to start instances During running of tox the system can timeout on slower systems. This is due to rpc calls to other threads failing in testing. This particular case involved shelving calls to start instances. However, when we started using objects, these calls became unnecessary for these unit tests. Thus we are removing the calls which should remove the traceback for slow systems. Change-Id: I52f1e86815cc2cac8dc31865f00eef2d4ffeea59 Partial-Bug: 1311778 --- nova/tests/compute/test_shelve.py | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py index 88ad6aba93..4bc195dd01 100644 --- a/nova/tests/compute/test_shelve.py +++ b/nova/tests/compute/test_shelve.py @@ -48,8 +48,6 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase): def _shelve_instance(self, shelved_offload_time): CONF.set_override('shelved_offload_time', shelved_offload_time) db_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, db_instance, {}, {}, [], None, - None, True, None, False) instance = objects.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -125,8 +123,6 @@ def test_shelve_offload(self): def test_shelve_volume_backed(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, db_instance, {}, {}, [], None, - None, True, None, False) instance = objects.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -168,8 +164,6 @@ def test_shelve_volume_backed(self): def test_unshelve(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, db_instance, {}, {}, [], None, - None, True, None, False) instance = objects.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -263,8 +257,6 @@ def test_unshelve_volume_backed(self): cur_time = timeutils.utcnow() cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc()) timeutils.set_time_override(cur_time) - self.compute.run_instance(self.context, db_instance, {}, {}, [], None, - None, True, None, False) instance = objects.Instance.get_by_uuid( self.context, db_instance['uuid'], expected_attrs=['metadata', 'system_metadata']) @@ -324,9 +316,6 @@ def test_unshelve_volume_backed(self): filter_properties=filter_properties, node=node) def test_shelved_poll_none_exist(self): - instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance, {}, {}, [], None, - None, True, None, False) self.mox.StubOutWithMock(self.compute.driver, 'destroy') self.mox.StubOutWithMock(timeutils, 'is_older_than') self.mox.ReplayAll() @@ -334,8 +323,6 @@ def test_shelved_poll_none_exist(self): def test_shelved_poll_not_timedout(self): instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance, {}, {}, [], None, - None, True, None, False) sys_meta = utils.metadata_to_dict(instance['system_metadata']) shelved_time = timeutils.utcnow() timeutils.set_time_override(shelved_time) @@ -349,13 +336,7 @@ def test_shelved_poll_not_timedout(self): self.compute._poll_shelved_instances(self.context) def test_shelved_poll_timedout(self): - active_instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, active_instance, {}, {}, [], - None, None, True, None, False) - instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance, {}, {}, [], None, - None, True, None, False) sys_meta = utils.metadata_to_dict(instance['system_metadata']) shelved_time = timeutils.utcnow() timeutils.set_time_override(shelved_time) @@ -381,8 +362,6 @@ def test_shelve(self): fake_instance = self._create_fake_instance({'display_name': 'vm01'}) instance = jsonutils.to_primitive(fake_instance) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance, {}, {}, [], None, - None, True, None, False) self.assertIsNone(instance['task_state']) @@ -413,8 +392,6 @@ def test_unshelve(self): # Ensure instance can be unshelved. instance = jsonutils.to_primitive(self._create_fake_instance()) instance_uuid = instance['uuid'] - self.compute.run_instance(self.context, instance, {}, {}, [], None, - None, True, None, False) self.assertIsNone(instance['task_state']) From b9afab4d44e91e87634d85a0867664876baf880a Mon Sep 17 00:00:00 2001 From: Christopher Lefelhocz Date: Tue, 15 Jul 2014 15:05:58 -0500 Subject: [PATCH 122/486] Fix nova cells exiting on db failure at launch We have seen cases where db errors at launch can cause cell services to exit without retrying. The service shouldn't exit __init__ much like it doesn't later by handling this type of exceptions. We'll wait an hour and then give up. Change-Id: I24c9eb811d50d1fa6a5e4a5f595ebf68ded3b7b5 Closes-Bug: 1342257 --- nova/cells/state.py | 30 ++++++++++++++------ nova/tests/cells/test_cells_state_manager.py | 17 +++++++++++ 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/nova/cells/state.py b/nova/cells/state.py index 27261a2c98..aa2257d397 100644 --- a/nova/cells/state.py +++ b/nova/cells/state.py @@ -19,6 +19,7 @@ import copy import datetime import functools +import time from oslo.config import cfg @@ -27,6 +28,7 @@ from nova.db import base from nova import exception from nova.i18n import _ +from nova.openstack.common.db import exception as db_exc from nova.openstack.common import fileutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -36,14 +38,14 @@ from nova import utils cell_state_manager_opts = [ - cfg.IntOpt('db_check_interval', - default=60, - help='Interval, in seconds, for getting fresh cell ' - 'information from the database.'), - cfg.StrOpt('cells_config', - help='Configuration file from which to read cells ' - 'configuration. If given, overrides reading cells ' - 'from the database.'), + cfg.IntOpt('db_check_interval', + default=60, + help='Interval, in seconds, for getting fresh cell ' + 'information from the database.'), + cfg.StrOpt('cells_config', + help='Configuration file from which to read cells ' + 'configuration. If given, overrides reading cells ' + 'from the database.'), ] @@ -169,7 +171,17 @@ def __init__(self, cell_state_cls=None): self.child_cells = {} self.last_cell_db_check = datetime.datetime.min - self._cell_data_sync(force=True) + attempts = 0 + while True: + try: + self._cell_data_sync(force=True) + break + except db_exc.DBError as e: + attempts += 1 + if attempts > 120: + raise + LOG.exception(_('DB error: %s') % e) + time.sleep(30) my_cell_capabs = {} for cap in CONF.cells.capabilities: diff --git a/nova/tests/cells/test_cells_state_manager.py b/nova/tests/cells/test_cells_state_manager.py index 1c299277b6..1a9347d1c3 100644 --- a/nova/tests/cells/test_cells_state_manager.py +++ b/nova/tests/cells/test_cells_state_manager.py @@ -16,12 +16,16 @@ Tests For CellStateManager """ +import time + +import mock from oslo.config import cfg from nova.cells import state from nova import db from nova.db.sqlalchemy import models from nova import exception +from nova.openstack.common.db import exception as db_exc from nova import test @@ -145,6 +149,19 @@ def _capacity(self, reserve_percent): return my_state.capacities +class TestCellStateManagerException(test.TestCase): + @mock.patch.object(time, 'sleep') + def test_init_db_error(self, mock_sleep): + class TestCellStateManagerDB(state.CellStateManagerDB): + def __init__(self): + self._cell_data_sync = mock.Mock() + self._cell_data_sync.side_effect = [db_exc.DBError(), []] + super(TestCellStateManagerDB, self).__init__() + test = TestCellStateManagerDB() + mock_sleep.assert_called_once_with(30) + self.assertEqual(test._cell_data_sync.call_count, 2) + + class TestCellsGetCapacity(TestCellsStateManager): def setUp(self): super(TestCellsGetCapacity, self).setUp() From 8faf206942d743f3da486c717b8320bc6ced8f4a Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 22 Jul 2014 13:46:44 +0000 Subject: [PATCH 123/486] Updated from global requirements Change-Id: Ie52204a9bbfd3e608a947f71dd3f406129aa9533 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e76e6260bb..7a6f2f9a86 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,4 +35,4 @@ oslo.config>=1.2.1 oslo.rootwrap pycadf>=0.5.1 oslo.messaging>=1.3.0 -oslo.i18n>=0.1.0 +oslo.i18n>=0.1.0 # Apache-2.0 From 2ee0a158a9d348f8bbf8e5143284ed712ce008fb Mon Sep 17 00:00:00 2001 From: Sylvain Bauza Date: Tue, 22 Jul 2014 14:55:23 +0200 Subject: [PATCH 124/486] Deprecate scheduler prep_resize Scheduler prep_resize is no longer necessary as now the retries from compute are going to the conductor. Deprecating this method is also a prerequisite for the split of the scheduler as it was issuing calls to computes. Change-Id: I1092bbf7f541bb8cd7e33317f33b8c02761f284e --- nova/scheduler/manager.py | 2 ++ nova/scheduler/rpcapi.py | 14 -------------- nova/tests/scheduler/test_rpcapi.py | 7 ------- 3 files changed, 2 insertions(+), 21 deletions(-) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 2d94a4bb27..c370d0cef2 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -99,6 +99,8 @@ def run_instance(self, context, request_spec, admin_password, 'task_state': None}, context, ex, request_spec) + # NOTE(sbauza): Remove this method when the scheduler rpc interface is + # bumped to 4.x as it is no longer used. def prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, reservations): """Tries to call schedule_prep_resize on the driver. diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py index 3ff86f0ebf..86aa9797f6 100644 --- a/nova/scheduler/rpcapi.py +++ b/nova/scheduler/rpcapi.py @@ -20,7 +20,6 @@ from oslo import messaging from nova.objects import base as objects_base -from nova.openstack.common import jsonutils from nova import rpc rpcapi_opts = [ @@ -105,16 +104,3 @@ def select_destinations(self, ctxt, request_spec, filter_properties): cctxt = self.client.prepare() return cctxt.call(ctxt, 'select_destinations', request_spec=request_spec, filter_properties=filter_properties) - - def prep_resize(self, ctxt, instance, instance_type, image, - request_spec, filter_properties, reservations): - instance_p = jsonutils.to_primitive(instance) - instance_type_p = jsonutils.to_primitive(instance_type) - reservations_p = jsonutils.to_primitive(reservations) - image_p = jsonutils.to_primitive(image) - cctxt = self.client.prepare() - cctxt.cast(ctxt, 'prep_resize', - instance=instance_p, instance_type=instance_type_p, - image=image_p, request_spec=request_spec, - filter_properties=filter_properties, - reservations=reservations_p) diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py index de088b2212..0ba0feb540 100644 --- a/nova/tests/scheduler/test_rpcapi.py +++ b/nova/tests/scheduler/test_rpcapi.py @@ -63,13 +63,6 @@ def _test_scheduler_api(self, method, rpc_method, **kwargs): retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(retval, expected_retval) - def test_prep_resize(self): - self._test_scheduler_api('prep_resize', rpc_method='cast', - instance='fake_instance', - instance_type='fake_type', image='fake_image', - request_spec='fake_request_spec', - filter_properties='fake_props', reservations=list('fake_res')) - def test_select_destinations(self): self._test_scheduler_api('select_destinations', rpc_method='call', request_spec='fake_request_spec', From ab32995fed7084c68147eb837201767f673e89f5 Mon Sep 17 00:00:00 2001 From: "boh.ricky" Date: Tue, 3 Jun 2014 22:58:15 +0800 Subject: [PATCH 125/486] servers list API support specify multi-status Currently the service list API allows the user to specify an optional status value to use as a filter - for example to limit the list to only servers with a status of Active. However often the user wants to filter the list by a set of status values, for example list servers with a status of Active or Error, which requires two separate API calls. Allowing the API to accept a list of status values would reduce this to a single API call. Allow to specify status value for many times in a request. For example:: GET /v2/{tenant_id}/servers?status=ACTIVE&status=ERROR GET /v3/servers?status=ACTIVE&status=ERROR V2 API extension:: { "alias": "os-server-list-multi-status", "description": "Allow to filter the servers by a set of status values.", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/ os-server-list-multi-status/api/v2", "updated": "2014-05-11T00:00:00Z" } DocImpact: Adds os-server-list-multi-status extension. blueprint servers-list-support-multi-status Change-Id: Id0109c56070e2f920be0f95738749aa969258bc1 --- .../all_extensions/extensions-get-resp.json | 10 ++++- .../all_extensions/extensions-get-resp.xml | 5 ++- .../server-post-req.json | 16 +++++++ .../server-post-req.xml | 19 ++++++++ .../server-post-resp.json | 16 +++++++ .../server-post-resp.xml | 6 +++ .../servers-list-resp.json | 18 ++++++++ .../servers-list-resp.xml | 7 +++ nova/api/openstack/common.py | 7 +-- .../contrib/server_list_multi_status.py | 25 +++++++++++ .../openstack/compute/plugins/v3/servers.py | 8 ++-- nova/api/openstack/compute/servers.py | 8 ++-- .../api/openstack/compute/test_extensions.py | 1 + .../api/openstack/compute/test_servers.py | 45 +++++++++++++++++++ nova/tests/api/openstack/test_common.py | 16 ++++++- .../extensions-get-resp.json.tpl | 8 ++++ .../extensions-get-resp.xml.tpl | 3 ++ .../server-post-req.json.tpl | 16 +++++++ .../server-post-req.xml.tpl | 19 ++++++++ .../server-post-resp.json.tpl | 16 +++++++ .../server-post-resp.xml.tpl | 6 +++ .../servers-list-resp.json.tpl | 18 ++++++++ .../servers-list-resp.xml.tpl | 7 +++ nova/tests/integrated/test_api_samples.py | 17 +++++++ 24 files changed, 304 insertions(+), 13 deletions(-) create mode 100644 doc/api_samples/os-server-list-multi-status/server-post-req.json create mode 100644 doc/api_samples/os-server-list-multi-status/server-post-req.xml create mode 100644 doc/api_samples/os-server-list-multi-status/server-post-resp.json create mode 100644 doc/api_samples/os-server-list-multi-status/server-post-resp.xml create mode 100644 doc/api_samples/os-server-list-multi-status/servers-list-resp.json create mode 100644 doc/api_samples/os-server-list-multi-status/servers-list-resp.xml create mode 100644 nova/api/openstack/compute/contrib/server_list_multi_status.py create mode 100644 nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json index cc1afaac76..20453b912c 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.json +++ b/doc/api_samples/all_extensions/extensions-get-resp.json @@ -560,6 +560,14 @@ "namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2", "updated": "2013-06-20T00:00:00Z" }, + { + "alias": "os-server-list-multi-status", + "description": "Allow to filter the servers by a set of status values.", + "links": [], + "name": "ServerListMultiStatus", + "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2", + "updated": "2014-05-11T00:00:00Z" + }, { "alias": "os-server-password", "description": "Server password support.", @@ -665,4 +673,4 @@ "updated": "2011-03-25T00:00:00Z" } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml index ffa9ff6bf5..4395f94776 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.xml +++ b/doc/api_samples/all_extensions/extensions-get-resp.xml @@ -228,6 +228,9 @@ Server group support. + + Allow to filter the servers by a set of status values. + Server password support. @@ -267,4 +270,4 @@ Volumes support. - \ No newline at end of file + diff --git a/doc/api_samples/os-server-list-multi-status/server-post-req.json b/doc/api_samples/os-server-list-multi-status/server-post-req.json new file mode 100644 index 0000000000..2269848f46 --- /dev/null +++ b/doc/api_samples/os-server-list-multi-status/server-post-req.json @@ -0,0 +1,16 @@ +{ + "server": { + "flavorRef": "http://openstack.example.com/openstack/flavors/1", + "imageRef": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "personality": [ + { + "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==", + "path": "/etc/banner.txt" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/os-server-list-multi-status/server-post-req.xml b/doc/api_samples/os-server-list-multi-status/server-post-req.xml new file mode 100644 index 0000000000..2dbbb4438d --- /dev/null +++ b/doc/api_samples/os-server-list-multi-status/server-post-req.xml @@ -0,0 +1,19 @@ + + + + Apache1 + + + + ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp + dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k + IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs + c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g + QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo + ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv + dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy + c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 + b25zLiINCg0KLVJpY2hhcmQgQmFjaA== + + + diff --git a/doc/api_samples/os-server-list-multi-status/server-post-resp.json b/doc/api_samples/os-server-list-multi-status/server-post-resp.json new file mode 100644 index 0000000000..29ce137179 --- /dev/null +++ b/doc/api_samples/os-server-list-multi-status/server-post-resp.json @@ -0,0 +1,16 @@ +{ + "server": { + "adminPass": "MVk5HPrazHcG", + "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", + "links": [ + { + "href": "http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", + "rel": "self" + }, + { + "href": "http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", + "rel": "bookmark" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/os-server-list-multi-status/server-post-resp.xml b/doc/api_samples/os-server-list-multi-status/server-post-resp.xml new file mode 100644 index 0000000000..9725f33bf4 --- /dev/null +++ b/doc/api_samples/os-server-list-multi-status/server-post-resp.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/doc/api_samples/os-server-list-multi-status/servers-list-resp.json b/doc/api_samples/os-server-list-multi-status/servers-list-resp.json new file mode 100644 index 0000000000..2cc75eef19 --- /dev/null +++ b/doc/api_samples/os-server-list-multi-status/servers-list-resp.json @@ -0,0 +1,18 @@ +{ + "servers": [ + { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "links": [ + { + "href": "http://openstack.example.com/v2/openstack/servers/616fb98f-46ca-475e-917e-2563e5a8cd19", + "rel": "self" + }, + { + "href": "http://openstack.example.com/openstack/servers/616fb98f-46ca-475e-917e-2563e5a8cd19", + "rel": "bookmark" + } + ], + "name": "new-server-test" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-server-list-multi-status/servers-list-resp.xml b/doc/api_samples/os-server-list-multi-status/servers-list-resp.xml new file mode 100644 index 0000000000..cbd7892e73 --- /dev/null +++ b/doc/api_samples/os-server-list-multi-status/servers-list-resp.xml @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index e583d364e2..276eb5c38b 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -136,16 +136,17 @@ def status_from_state(vm_state, task_state='default'): return status -def task_and_vm_state_from_status(status): - """Map the server status string to list of vm states and +def task_and_vm_state_from_status(statuses): + """Map the server's multiple status strings to list of vm states and list of task states. """ vm_states = set() task_states = set() + lower_statuses = [status.lower() for status in statuses] for state, task_map in _STATE_MAP.iteritems(): for task_state, mapped_state in task_map.iteritems(): status_string = mapped_state - if status.lower() == status_string.lower(): + if status_string.lower() in lower_statuses: vm_states.add(state) task_states.add(task_state) # Add sort to avoid different order on set in Python 3 diff --git a/nova/api/openstack/compute/contrib/server_list_multi_status.py b/nova/api/openstack/compute/contrib/server_list_multi_status.py new file mode 100644 index 0000000000..bdcb2f883f --- /dev/null +++ b/nova/api/openstack/compute/contrib/server_list_multi_status.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.openstack import extensions + + +class Server_list_multi_status(extensions.ExtensionDescriptor): + """Allow to specify multiple status values concurrently in the servers + list API.. + """ + + name = "ServerListMultiStatus" + alias = "os-server-list-multi-status" + namespace = ("http://docs.openstack.org/compute/ext/" + "os-server-list-multi-status/api/v2") + updated = "2014-05-11T00:00:00Z" diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index b324a4c1f7..5f54a540f8 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -195,9 +195,11 @@ def _get_servers(self, req, is_detail): # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. - status = search_opts.pop('status', None) - if status is not None: - vm_state, task_state = common.task_and_vm_state_from_status(status) + search_opts.pop('status', None) + if 'status' in req.GET.keys(): + statuses = req.GET.getall('status') + states = common.task_and_vm_state_from_status(statuses) + vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 831eb9ff9b..9d73f05c7d 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -534,9 +534,11 @@ def _get_servers(self, req, is_detail): # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. - status = search_opts.pop('status', None) - if status is not None: - vm_state, task_state = common.task_and_vm_state_from_status(status) + search_opts.pop('status', None) + if 'status' in req.GET.keys(): + statuses = req.GET.getall('status') + states = common.task_and_vm_state_from_status(statuses) + vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py index 975935257e..856eafb2c7 100644 --- a/nova/tests/api/openstack/compute/test_extensions.py +++ b/nova/tests/api/openstack/compute/test_extensions.py @@ -235,6 +235,7 @@ def setUp(self): "SecurityGroupDefaultRules", "SecurityGroups", "ServerDiagnostics", + "ServerListMultiStatus", "ServerPassword", "ServerStartStop", "Services", diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index bfcb189e75..d67a5ed6e5 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -872,6 +872,51 @@ def fake_get_all(compute_self, context, search_opts=None, self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) + @mock.patch.object(compute_api.API, 'get_all') + def test_get_servers_allows_multi_status(self, get_all_mock): + server_uuid0 = str(uuid.uuid4()) + server_uuid1 = str(uuid.uuid4()) + db_list = [fakes.stub_instance(100, uuid=server_uuid0), + fakes.stub_instance(101, uuid=server_uuid1)] + get_all_mock.return_value = instance_obj._make_instance_list( + context, instance_obj.InstanceList(), db_list, FIELDS) + + req = fakes.HTTPRequest.blank( + '/fake/servers?status=active&status=error') + servers = self.controller.index(req)['servers'] + self.assertEqual(2, len(servers)) + self.assertEqual(server_uuid0, servers[0]['id']) + self.assertEqual(server_uuid1, servers[1]['id']) + expected_search_opts = dict(deleted=False, + vm_state=[vm_states.ACTIVE, + vm_states.ERROR], + project_id='fake') + get_all_mock.assert_called_once_with(mock.ANY, + search_opts=expected_search_opts, limit=mock.ANY, + marker=mock.ANY, want_objects=mock.ANY) + + @mock.patch.object(compute_api.API, 'get_all') + def test_get_servers_allows_invalid_status(self, get_all_mock): + server_uuid0 = str(uuid.uuid4()) + server_uuid1 = str(uuid.uuid4()) + db_list = [fakes.stub_instance(100, uuid=server_uuid0), + fakes.stub_instance(101, uuid=server_uuid1)] + get_all_mock.return_value = instance_obj._make_instance_list( + context, instance_obj.InstanceList(), db_list, FIELDS) + + req = fakes.HTTPRequest.blank( + '/fake/servers?status=active&status=invalid') + servers = self.controller.index(req)['servers'] + self.assertEqual(2, len(servers)) + self.assertEqual(server_uuid0, servers[0]['id']) + self.assertEqual(server_uuid1, servers[1]['id']) + expected_search_opts = dict(deleted=False, + vm_state=[vm_states.ACTIVE], + project_id='fake') + get_all_mock.assert_called_once_with(mock.ANY, + search_opts=expected_search_opts, limit=mock.ANY, + marker=mock.ANY, want_objects=mock.ANY) + def test_get_servers_allows_task_status(self): server_uuid = str(uuid.uuid4()) task_state = task_states.REBOOTING diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index c18e8cac1c..c9154b8d11 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -360,14 +360,14 @@ def test_status_from_state(self): self.assertEqual(expected, actual) def test_task_and_vm_state_from_status(self): - fixture1 = 'reboot' + fixture1 = ['reboot'] actual = common.task_and_vm_state_from_status(fixture1) expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING] self.assertEqual(expected, actual) - fixture2 = 'resize' + fixture2 = ['resize'] actual = common.task_and_vm_state_from_status(fixture2) expected = ([vm_states.ACTIVE, vm_states.STOPPED], [task_states.RESIZE_FINISH, @@ -376,6 +376,18 @@ def test_task_and_vm_state_from_status(self): task_states.RESIZE_PREP]) self.assertEqual(expected, actual) + fixture3 = ['resize', 'reboot'] + actual = common.task_and_vm_state_from_status(fixture3) + expected = ([vm_states.ACTIVE, vm_states.STOPPED], + [task_states.REBOOT_PENDING, + task_states.REBOOT_STARTED, + task_states.REBOOTING, + task_states.RESIZE_FINISH, + task_states.RESIZE_MIGRATED, + task_states.RESIZE_MIGRATING, + task_states.RESIZE_PREP]) + self.assertEqual(expected, actual) + class TestCollectionLinks(test.NoDBTestCase): """Tests the _get_collection_links method.""" diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl index 75aa72746a..11934e14ce 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl @@ -536,6 +536,14 @@ "namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1", "updated": "%(isotime)s" }, + { + "alias": "os-server-list-multi-status", + "description": "%(text)s", + "links": [], + "name": "ServerListMultiStatus", + "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2", + "updated": "%(isotime)s" + }, { "alias": "os-server-password", "description": "%(text)s", diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl index 4d914ed9bf..1a44e23a1d 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl @@ -192,6 +192,9 @@ %(text)s + + %(text)s + %(text)s diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl new file mode 100644 index 0000000000..d3916d1aa6 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl @@ -0,0 +1,16 @@ +{ + "server" : { + "name" : "new-server-test", + "imageRef" : "%(host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", + "metadata" : { + "My Server Name" : "Apache1" + }, + "personality" : [ + { + "path" : "/etc/banner.txt", + "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" + } + ] + } +} diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl new file mode 100644 index 0000000000..f926149842 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl @@ -0,0 +1,19 @@ + + + + Apache1 + + + + ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp + dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k + IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs + c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g + QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo + ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv + dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy + c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 + b25zLiINCg0KLVJpY2hhcmQgQmFjaA== + + + diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl new file mode 100644 index 0000000000..d5f030c873 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl @@ -0,0 +1,16 @@ +{ + "server": { + "adminPass": "%(password)s", + "id": "%(id)s", + "links": [ + { + "href": "%(host)s/v2/openstack/servers/%(uuid)s", + "rel": "self" + }, + { + "href": "%(host)s/openstack/servers/%(uuid)s", + "rel": "bookmark" + } + ] + } +} diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl new file mode 100644 index 0000000000..3bb13e69bd --- /dev/null +++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl @@ -0,0 +1,6 @@ + + + + + + diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl new file mode 100644 index 0000000000..8b97dc28d7 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl @@ -0,0 +1,18 @@ +{ + "servers": [ + { + "id": "%(id)s", + "links": [ + { + "href": "%(host)s/v2/openstack/servers/%(id)s", + "rel": "self" + }, + { + "href": "%(host)s/openstack/servers/%(id)s", + "rel": "bookmark" + } + ], + "name": "new-server-test" + } + ] +} diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl new file mode 100644 index 0000000000..03bee03a6e --- /dev/null +++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl @@ -0,0 +1,7 @@ + + + + + + + diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 73fab36c98..ee4eef3a87 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -234,6 +234,23 @@ class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest): ctype = 'xml' +class ServersSampleMultiStatusJsonTest(ServersSampleBase): + extension_name = '.'.join(('nova.api.openstack.compute.contrib', + 'server_list_multi_status', + 'Server_list_multi_status')) + + def test_servers_list(self): + uuid = self._post_server() + response = self._do_get('servers?status=active&status=error') + subs = self._get_regexes() + subs['id'] = uuid + self._verify_response('servers-list-resp', subs, response, 200) + + +class ServersSampleMultiStatusXMLTest(ServersSampleMultiStatusJsonTest): + ctype = 'xml' + + class ServersMetadataJsonTest(ServersSampleBase): def _create_and_set(self, subs): uuid = self._post_server() From 7d8a78a29128debe7ed49bea394f952f37cee498 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Thu, 13 Mar 2014 15:09:34 -0700 Subject: [PATCH 126/486] Return status for compute node Currently when return compute node information, there is no status returned. When the corresponding service is disabled or down and users try to do 'hypervisor-list' or 'hypervisor-show', they will have no idea of it. Implements: blueprint return-status-for-hypervisor-node Closes-Bug: #1285259 DocImpact Change-Id: I17c53b454ccef023f298f1b8875daef965d2325d --- .../all_extensions/extensions-get-resp.json | 8 ++ .../all_extensions/extensions-get-resp.xml | 3 + .../hypervisors-show-with-status-resp.json | 27 +++++++ .../hypervisors-show-with-status-resp.xml | 4 + .../hypervisors-detail-resp.json | 5 +- .../os-hypervisors/hypervisors-list-resp.json | 6 +- .../hypervisors-search-resp.json | 6 +- .../hypervisors-servers-resp.json | 4 +- .../os-hypervisors/hypervisors-show-resp.json | 5 +- .../hypervisors-uptime-resp.json | 4 +- .../os-pci/hypervisors-pci-detail-resp.json | 5 +- .../os-pci/hypervisors-pci-show-resp.json | 5 +- .../compute/contrib/hypervisor_status.py | 25 ++++++ .../openstack/compute/contrib/hypervisors.py | 15 ++++ .../compute/plugins/v3/hypervisors.py | 7 ++ .../compute/contrib/test_hypervisor_status.py | 80 +++++++++++++++++++ .../compute/plugins/v3/test_hypervisors.py | 69 +++++++++++++--- .../extensions-get-resp.json.tpl | 8 ++ .../extensions-get-resp.xml.tpl | 3 + ...hypervisors-show-with-status-resp.json.tpl | 27 +++++++ .../hypervisors-show-with-status-resp.xml.tpl | 4 + nova/tests/integrated/test_api_samples.py | 38 ++++++++- .../hypervisors-detail-resp.json.tpl | 5 +- .../hypervisors-list-resp.json.tpl | 4 +- .../hypervisors-search-resp.json.tpl | 6 +- .../hypervisors-servers-resp.json.tpl | 4 +- .../hypervisors-show-resp.json.tpl | 5 +- .../hypervisors-uptime-resp.json.tpl | 2 + .../hypervisors-pci-detail-resp.json.tpl | 5 +- .../os-pci/hypervisors-pci-show-resp.json.tpl | 5 +- nova/tests/integrated/v3/test_pci.py | 35 ++++---- 31 files changed, 383 insertions(+), 46 deletions(-) create mode 100644 doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json create mode 100644 doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml create mode 100644 nova/api/openstack/compute/contrib/hypervisor_status.py create mode 100644 nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py create mode 100644 nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json index 4d4967accf..0359faf0c4 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.json +++ b/doc/api_samples/all_extensions/extensions-get-resp.json @@ -312,6 +312,14 @@ "namespace": "http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2", "updated": "2014-01-04T00:00:00Z" }, + { + "alias": "os-hypervisor-status", + "description": "Show hypervisor status.", + "links": [], + "name": "HypervisorStatus", + "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1", + "updated": "2014-04-17T00:00:00Z" + }, { "alias": "os-extended-services", "description": "Extended services support.", diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml index d69e8ab40a..ff47a36b95 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.xml +++ b/doc/api_samples/all_extensions/extensions-get-resp.xml @@ -143,6 +143,9 @@ Extended services deletion support. + + Show hypervisor status. + Extended Volumes support. diff --git a/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json new file mode 100644 index 0000000000..44af92c433 --- /dev/null +++ b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json @@ -0,0 +1,27 @@ +{ + "hypervisor": { + "cpu_info": "?", + "current_workload": 0, + "disk_available_least": 0, + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": 1, + "status": "enabled", + "state": "up", + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "service": { + "host": "5641188ab2964f88a21042b493585ff8", + "id": 2, + "disabled_reason": null + }, + "vcpus": 1, + "vcpus_used": 0 + } +} diff --git a/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml new file mode 100644 index 0000000000..dbfec700ce --- /dev/null +++ b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml @@ -0,0 +1,4 @@ + + + + diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json index e800c777ae..8694c135a0 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json @@ -3,6 +3,8 @@ { "cpu_info": "?", "current_workload": 0, + "status": "enabled", + "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, @@ -18,7 +20,8 @@ "running_vms": 0, "service": { "host": "e6a37ee802d74863ab8b91ade8f12a67", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json index 8d94021274..375627499d 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json @@ -2,7 +2,9 @@ "hypervisors": [ { "hypervisor_hostname": "fake-mini", - "id": 1 + "id": 1, + "state": "up", + "status": "enabled" } ] -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json index 8d94021274..375627499d 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json @@ -2,7 +2,9 @@ "hypervisors": [ { "hypervisor_hostname": "fake-mini", - "id": 1 + "id": 1, + "state": "up", + "status": "enabled" } ] -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json index 934ef0c02d..710b05b930 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json @@ -2,6 +2,8 @@ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": 1, + "state": "up", + "status": "enabled", "servers": [] } -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json index 0c4957bdae..3d2c972ce3 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json @@ -1,6 +1,8 @@ { "hypervisor": { "cpu_info": "?", + "state": "up", + "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", @@ -17,7 +19,8 @@ "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json index f5f9d19e7c..78521b3731 100644 --- a/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json +++ b/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json @@ -2,6 +2,8 @@ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": 1, + "state": "up", + "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json b/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json index 1ca293225e..f6f7363ef4 100644 --- a/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json +++ b/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json @@ -2,6 +2,8 @@ "hypervisors": [ { "cpu_info": "?", + "state": "up", + "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", @@ -30,7 +32,8 @@ "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json b/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json index 2a6e41bf4d..f2fa988f83 100644 --- a/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json +++ b/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json @@ -4,6 +4,8 @@ "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", + "state": "up", + "status": "enabled", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", @@ -29,7 +31,8 @@ "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/nova/api/openstack/compute/contrib/hypervisor_status.py b/nova/api/openstack/compute/contrib/hypervisor_status.py new file mode 100644 index 0000000000..94bcabca48 --- /dev/null +++ b/nova/api/openstack/compute/contrib/hypervisor_status.py @@ -0,0 +1,25 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.openstack import extensions + + +class Hypervisor_status(extensions.ExtensionDescriptor): + """Show hypervisor status.""" + + name = "HypervisorStatus" + alias = "os-hypervisor-status" + namespace = ("http://docs.openstack.org/compute/ext/" + "hypervisor_status/api/v1.1") + updated = "2014-04-17T00:00:00Z" diff --git a/nova/api/openstack/compute/contrib/hypervisors.py b/nova/api/openstack/compute/contrib/hypervisors.py index 22b51fce74..d2df93fa82 100644 --- a/nova/api/openstack/compute/contrib/hypervisors.py +++ b/nova/api/openstack/compute/contrib/hypervisors.py @@ -23,6 +23,7 @@ from nova import compute from nova import exception from nova.i18n import _ +from nova import servicegroup authorize = extensions.extension_authorizer('compute', 'hypervisors') @@ -31,6 +32,8 @@ def make_hypervisor(elem, detail): elem.set('hypervisor_hostname') elem.set('id') + elem.set('state') + elem.set('status') if detail: elem.set('vcpus') elem.set('memory_mb') @@ -52,6 +55,7 @@ def make_hypervisor(elem, detail): selector='service') service.set('id') service.set('host') + service.set('disabled_reason') class HypervisorIndexTemplate(xmlutil.TemplateBuilder): @@ -128,6 +132,7 @@ class HypervisorsController(object): def __init__(self, ext_mgr): self.host_api = compute.HostAPI() + self.servicegroup_api = servicegroup.API() super(HypervisorsController, self).__init__() self.ext_mgr = ext_mgr @@ -137,6 +142,13 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs): 'hypervisor_hostname': hypervisor['hypervisor_hostname'], } + ext_status_loaded = self.ext_mgr.is_loaded('os-hypervisor-status') + if ext_status_loaded: + alive = self.servicegroup_api.service_is_up(hypervisor['service']) + hyp_dict['state'] = 'up' if alive else "down" + hyp_dict['status'] = ( + 'disabled' if hypervisor['service']['disabled'] else 'enabled') + if detail and not servers: fields = ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', @@ -153,6 +165,9 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs): 'id': hypervisor['service_id'], 'host': hypervisor['service']['host'], } + if ext_status_loaded: + hyp_dict['service'].update( + disabled_reason=hypervisor['service']['disabled_reason']) if servers: hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid']) diff --git a/nova/api/openstack/compute/plugins/v3/hypervisors.py b/nova/api/openstack/compute/plugins/v3/hypervisors.py index cf145e6283..c38ed4af18 100644 --- a/nova/api/openstack/compute/plugins/v3/hypervisors.py +++ b/nova/api/openstack/compute/plugins/v3/hypervisors.py @@ -21,6 +21,7 @@ from nova import compute from nova import exception from nova.i18n import _ +from nova import servicegroup ALIAS = "os-hypervisors" @@ -32,12 +33,17 @@ class HypervisorsController(object): def __init__(self): self.host_api = compute.HostAPI() + self.servicegroup_api = servicegroup.API() super(HypervisorsController, self).__init__() def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs): + alive = self.servicegroup_api.service_is_up(hypervisor['service']) hyp_dict = { 'id': hypervisor['id'], 'hypervisor_hostname': hypervisor['hypervisor_hostname'], + 'state': 'up' if alive else 'down', + 'status': ('disabled' if hypervisor['service']['disabled'] + else 'enabled'), } if detail and not servers: @@ -52,6 +58,7 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs): hyp_dict['service'] = { 'id': hypervisor['service_id'], 'host': hypervisor['service']['host'], + 'disabled_reason': hypervisor['service']['disabled_reason'], } if servers is not None: diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py b/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py new file mode 100644 index 0000000000..1d8cd95358 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py @@ -0,0 +1,80 @@ +# Copyright 2014 Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock + +from nova.api.openstack.compute.contrib import hypervisors +from nova.tests.api.openstack.compute.contrib import test_hypervisors + + +TEST_HYPER = dict(test_hypervisors.TEST_HYPERS[0], + service=dict(id=1, + host="compute1", + binary="nova-compute", + topic="compute_topic", + report_count=5, + disabled=False, + disabled_reason=None, + availability_zone="nova"), + ) + + +class HypervisorStatusTest(test_hypervisors.HypervisorsTest): + def _prepare_extension(self): + self.ext_mgr.extensions['os-hypervisor-status'] = True + self.controller = hypervisors.HypervisorsController(self.ext_mgr) + self.controller.servicegroup_api.service_is_up = mock.MagicMock( + return_value=True) + + def test_view_hypervisor_service_status(self): + self._prepare_extension() + result = self.controller._view_hypervisor( + TEST_HYPER, False) + self.assertEqual('enabled', result['status']) + self.assertEqual('up', result['state']) + self.assertEqual('enabled', result['status']) + + self.controller.servicegroup_api.service_is_up.return_value = False + result = self.controller._view_hypervisor( + TEST_HYPER, False) + self.assertEqual('down', result['state']) + + hyper = copy.deepcopy(TEST_HYPER) + hyper['service']['disabled'] = True + result = self.controller._view_hypervisor(hyper, False) + self.assertEqual('disabled', result['status']) + + def test_view_hypervisor_detail_status(self): + self._prepare_extension() + + result = self.controller._view_hypervisor( + TEST_HYPER, True) + + self.assertEqual('enabled', result['status']) + self.assertEqual('up', result['state']) + self.assertIsNone(result['service']['disabled_reason']) + + self.controller.servicegroup_api.service_is_up.return_value = False + result = self.controller._view_hypervisor( + TEST_HYPER, True) + self.assertEqual('down', result['state']) + + hyper = copy.deepcopy(TEST_HYPER) + hyper['service']['disabled'] = True + hyper['service']['disabled_reason'] = "fake" + result = self.controller._view_hypervisor(hyper, True) + self.assertEqual('disabled', result['status'],) + self.assertEqual('fake', result['service']['disabled_reason']) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py b/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py index 7d746c8148..039dae759c 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py @@ -13,6 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + +import mock from webob import exc from nova.api.openstack.compute.plugins.v3 import hypervisors @@ -32,6 +35,7 @@ topic="compute_topic", report_count=5, disabled=False, + disabled_reason=None, availability_zone="nova"), vcpus=4, memory_mb=10 * 1024, @@ -57,6 +61,7 @@ topic="compute_topic", report_count=5, disabled=False, + disabled_reason=None, availability_zone="nova"), vcpus=4, memory_mb=10 * 1024, @@ -134,6 +139,8 @@ class HypervisorsTest(test.NoDBTestCase): def setUp(self): super(HypervisorsTest, self).setUp() self.controller = hypervisors.HypervisorsController() + self.controller.servicegroup_api.service_is_up = mock.MagicMock( + return_value=True) self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all) self.stubs.Set(db, 'compute_node_search_by_hypervisor', @@ -148,7 +155,9 @@ def setUp(self): def test_view_hypervisor_nodetail_noservers(self): result = self.controller._view_hypervisor(TEST_HYPERS[0], False) - self.assertEqual(result, dict(id=1, hypervisor_hostname="hyper1")) + self.assertEqual(dict(id=1, hypervisor_hostname="hyper1", + state='up', status='enabled'), + result) def test_view_hypervisor_detail_noservers(self): result = self.controller._view_hypervisor(TEST_HYPERS[0], True) @@ -156,6 +165,8 @@ def test_view_hypervisor_detail_noservers(self): self.assertEqual(result, dict( id=1, hypervisor_hostname="hyper1", + state='up', + status='enabled', vcpus=4, memory_mb=10 * 1024, local_gb=250, @@ -171,7 +182,7 @@ def test_view_hypervisor_detail_noservers(self): cpu_info='cpu_info', disk_available_least=100, host_ip='1.1.1.1', - service=dict(id=1, host='compute1'))) + service=dict(id=1, host='compute1', disabled_reason=None))) def test_view_hypervisor_servers(self): result = self.controller._view_hypervisor(TEST_HYPERS[0], False, @@ -180,20 +191,44 @@ def test_view_hypervisor_servers(self): self.assertEqual(result, dict( id=1, hypervisor_hostname="hyper1", + state='up', + status='enabled', servers=[ dict(name="inst1", id="uuid1"), dict(name="inst2", id="uuid2"), dict(name="inst3", id="uuid3"), dict(name="inst4", id="uuid4")])) + def test_view_hypervisor_service_status(self): + result = self.controller._view_hypervisor(TEST_HYPERS[0], False) + self.assertEqual('up', result['state']) + self.assertEqual('enabled', result['status']) + + self.controller.servicegroup_api.service_is_up.return_value = False + result = self.controller._view_hypervisor(TEST_HYPERS[0], False) + self.assertEqual('down', result['state']) + self.assertEqual('enabled', result['status']) + + hyper = copy.deepcopy(TEST_HYPERS[0]) + hyper['service']['disabled'] = True + result = self.controller._view_hypervisor(hyper, False) + self.assertEqual('down', result['state']) + self.assertEqual('disabled', result['status']) + def test_index(self): req = fakes.HTTPRequestV3.blank('/os-hypervisors', use_admin_context=True) result = self.controller.index(req) self.assertEqual(result, dict(hypervisors=[ - dict(id=1, hypervisor_hostname="hyper1"), - dict(id=2, hypervisor_hostname="hyper2")])) + dict(id=1, + hypervisor_hostname="hyper1", + state='up', + status='enabled'), + dict(id=2, + hypervisor_hostname="hyper2", + state='up', + status='enabled')])) def test_index_non_admin(self): req = fakes.HTTPRequestV3.blank('/os-hypervisors') @@ -207,8 +242,11 @@ def test_detail(self): self.assertEqual(result, dict(hypervisors=[ dict(id=1, - service=dict(id=1, host="compute1"), + service=dict( + id=1, host="compute1", disabled_reason=None), vcpus=4, + state='up', + status='enabled', memory_mb=10 * 1024, local_gb=250, vcpus_used=2, @@ -225,8 +263,11 @@ def test_detail(self): disk_available_least=100, host_ip='1.1.1.1'), dict(id=2, - service=dict(id=2, host="compute2"), + service=dict(id=2, host="compute2", + disabled_reason=None), vcpus=4, + state='up', + status='enabled', memory_mb=10 * 1024, local_gb=250, vcpus_used=2, @@ -265,8 +306,10 @@ def test_show_withid(self): self.assertEqual(result, dict(hypervisor=dict( id=1, - service=dict(id=1, host="compute1"), + service=dict(id=1, host="compute1", disabled_reason=None), vcpus=4, + state='up', + status='enabled', memory_mb=10 * 1024, local_gb=250, vcpus_used=2, @@ -319,6 +362,8 @@ def fake_get_host_uptime(context, hyp): self.assertEqual(result, dict(hypervisor=dict( id=1, hypervisor_hostname="hyper1", + state='up', + status='enabled', uptime="fake uptime"))) def test_uptime_non_integer_id(self): @@ -336,8 +381,10 @@ def test_search(self): use_admin_context=True) result = self.controller.search(req) self.assertEqual(result, dict(hypervisors=[ - dict(id=1, hypervisor_hostname="hyper1"), - dict(id=2, hypervisor_hostname="hyper2")])) + dict(id=1, hypervisor_hostname="hyper1", + state='up', status='enabled'), + dict(id=2, hypervisor_hostname="hyper2", + state='up', status='enabled')])) def test_search_non_exist(self): def fake_compute_node_search_by_hypervisor_return_empty(context, @@ -362,6 +409,8 @@ def test_servers(self): self.assertEqual(result, dict(hypervisor= dict(id=1, hypervisor_hostname="hyper1", + state='up', + status='enabled', servers=[ dict(name="inst1", id="uuid1"), dict(name="inst3", id="uuid3")]))) @@ -387,6 +436,8 @@ def fake_instance_get_all_by_host_return_empty(context, hypervisor_re): self.assertEqual(result, dict(hypervisor= dict(id=1, hypervisor_hostname="hyper1", + state='up', + status='enabled', servers=[]))) def test_servers_with_non_integer_hypervisor_id(self): diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl index ac203b83d4..6a243b5af4 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl @@ -416,6 +416,14 @@ "namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1", "updated": "%(isotime)s" }, + { + "alias": "os-hypervisor-status", + "description": "%(text)s", + "links": [], + "name": "HypervisorStatus", + "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1", + "updated": "%(isotime)s" + }, { "alias": "os-server-external-events", "description": "%(text)s", diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl index 308d32136b..3e5ac8ea27 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl @@ -150,6 +150,9 @@ %(text)s + + %(text)s + %(text)s diff --git a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl new file mode 100644 index 0000000000..14464ccf4d --- /dev/null +++ b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl @@ -0,0 +1,27 @@ +{ + "hypervisor": { + "cpu_info": "?", + "current_workload": 0, + "disk_available_least": 0, + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": %(hypervisor_id)s, + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "state": "up", + "status": "enabled", + "service": { + "host": "%(host_name)s", + "id": 2, + "disabled_reason": null + }, + "vcpus": 1, + "vcpus_used": 0 + } +} diff --git a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl new file mode 100644 index 0000000000..6cfd860af5 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl @@ -0,0 +1,4 @@ + + + + diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 0d7dc63111..fedcee8fe0 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -1771,7 +1771,7 @@ def test_service_detail(self): 'status': 'disabled', 'state': 'up'} subs.update(self._get_regexes()) - return self._verify_response('services-get-resp', + self._verify_response('services-get-resp', subs, response, 200) def test_service_disable_log_reason(self): @@ -3584,6 +3584,12 @@ class HypervisorsSampleJsonTests(ApiSampleTestBaseV2): extension_name = ("nova.api.openstack.compute.contrib.hypervisors." "Hypervisors") + def setUp(self): + super(HypervisorsSampleJsonTests, self).setUp() + mock.patch("nova.servicegroup.API.service_is_up", + return_value=True).start() + self.addCleanup(mock.patch.stopall) + def test_hypervisors_list(self): response = self._do_get('os-hypervisors') self._verify_response('hypervisors-list-resp', {}, response, 200) @@ -3646,9 +3652,31 @@ def test_hypervisors_show_with_ip(self): class ExtendedHypervisorsXmlTest(ExtendedHypervisorsJsonTest): + ctype = "xml" + + +class HypervisorStatusJsonTest(ApiSampleTestBaseV2): + extends_name = ("nova.api.openstack.compute.contrib." + "hypervisors.Hypervisors") + extension_name = ("nova.api.openstack.compute.contrib." + "hypervisor_status.Hypervisor_status") + + def test_hypervisors_show_with_status(self): + hypervisor_id = 1 + subs = { + 'hypervisor_id': hypervisor_id + } + response = self._do_get('os-hypervisors/%s' % hypervisor_id) + subs.update(self._get_regexes()) + self._verify_response('hypervisors-show-with-status-resp', + subs, response, 200) + + +class HypervisorStatusXmlTest(HypervisorStatusJsonTest): ctype = 'xml' +@mock.patch("nova.servicegroup.API.service_is_up", return_value=True) class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2): extension_name = ("nova.api.openstack.compute.contrib.hypervisors." "Hypervisors") @@ -3657,9 +3685,11 @@ def setUp(self): self.flags(enable=True, cell_type='api', group='cells') super(HypervisorsCellsSampleJsonTests, self).setUp() - def test_hypervisor_uptime(self): - fake_hypervisor = {'service': {'host': 'fake-mini'}, 'id': 1, - 'hypervisor_hostname': 'fake-mini'} + def test_hypervisor_uptime(self, mocks): + fake_hypervisor = {'service': {'host': 'fake-mini', + 'disabled': False, + 'disabled_reason': None}, + 'id': 1, 'hypervisor_hostname': 'fake-mini'} def fake_get_host_uptime(self, context, hyp): return (" 08:32:11 up 93 days, 18:25, 12 users, load average:" diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl index fb473a03bb..2777eb4887 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl @@ -3,6 +3,8 @@ { "cpu_info": "?", "current_workload": 0, + "state": "up", + "status": "enabled", "disk_available_least": 0, "host_ip": "%(ip)s", "free_disk_gb": 1028, @@ -18,7 +20,8 @@ "running_vms": 0, "service": { "host": "%(host_name)s", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl index 8d94021274..710cdfcf9c 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl @@ -2,7 +2,9 @@ "hypervisors": [ { "hypervisor_hostname": "fake-mini", + "state": "up", + "status": "enabled", "id": 1 } ] -} \ No newline at end of file +} diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl index 8d94021274..375627499d 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl @@ -2,7 +2,9 @@ "hypervisors": [ { "hypervisor_hostname": "fake-mini", - "id": 1 + "id": 1, + "state": "up", + "status": "enabled" } ] -} \ No newline at end of file +} diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl index 934ef0c02d..710b05b930 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl @@ -2,6 +2,8 @@ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": 1, + "state": "up", + "status": "enabled", "servers": [] } -} \ No newline at end of file +} diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl index a1e5f2080b..f125da01af 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl @@ -3,6 +3,8 @@ "cpu_info": "?", "current_workload": 0, "disk_available_least": 0, + "state": "up", + "status": "enabled", "host_ip": "%(ip)s", "free_disk_gb": 1028, "free_ram_mb": 7680, @@ -17,7 +19,8 @@ "running_vms": 0, "service": { "host": "%(host_name)s", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl index 8a36c65f23..e2f6d2e47e 100644 --- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl @@ -2,6 +2,8 @@ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": %(hypervisor_id)s, + "state": "up", + "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } } diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl index 69c5df943f..f2bf2bc02c 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl @@ -2,6 +2,8 @@ "hypervisors": [ { "cpu_info": "?", + "state": "up", + "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "%(ip)s", @@ -30,7 +32,8 @@ "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl index 6a6fbe3d3b..3c0fc0abcd 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl @@ -2,6 +2,8 @@ "hypervisor": { "cpu_info": "?", "current_workload": 0, + "state": "up", + "status": "enabled", "disk_available_least": 0, "host_ip": "%(ip)s", "free_disk_gb": 1028, @@ -29,7 +31,8 @@ "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", - "id": 2 + "id": 2, + "disabled_reason": null }, "vcpus": 1, "vcpus_used": 0 diff --git a/nova/tests/integrated/v3/test_pci.py b/nova/tests/integrated/v3/test_pci.py index 30c664dbf2..91aba3f9c7 100644 --- a/nova/tests/integrated/v3/test_pci.py +++ b/nova/tests/integrated/v3/test_pci.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from nova import db from nova.openstack.common import jsonutils from nova.tests.integrated.v3 import api_sample_base @@ -83,6 +85,8 @@ def setUp(self): "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", + "state": "up", + "status": "enabled", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", @@ -94,8 +98,10 @@ def setUp(self): "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, - "service": {"host": '043b3cacf6f34c90a724' - '5151fc8ebcda'}, + "service": {"host": '043b3cacf6f34c90a' + '7245151fc8ebcda', + "disabled": False, + "disabled_reason": None}, "vcpus": 1, "vcpus_used": 0, "service_id": 2, @@ -110,13 +116,12 @@ def setUp(self): ' "0x1"]]', "key1": "value1"}}]} - def test_pci_show(self): - def fake_compute_node_get(context, id): - self.fake_compute_node['pci_stats'] = jsonutils.dumps( - self.fake_compute_node['pci_stats']) - return self.fake_compute_node - - self.stubs.Set(db, 'compute_node_get', fake_compute_node_get) + @mock.patch("nova.servicegroup.API.service_is_up", return_value=True) + @mock.patch("nova.db.compute_node_get") + def test_pci_show(self, mock_db, mock_service): + self.fake_compute_node['pci_stats'] = jsonutils.dumps( + self.fake_compute_node['pci_stats']) + mock_db.return_value = self.fake_compute_node hypervisor_id = 1 response = self._do_get('os-hypervisors/%s' % hypervisor_id) subs = { @@ -126,13 +131,13 @@ def fake_compute_node_get(context, id): self._verify_response('hypervisors-pci-show-resp', subs, response, 200) - def test_pci_detail(self): - def fake_compute_node_get_all(context): - self.fake_compute_node['pci_stats'] = jsonutils.dumps( - self.fake_compute_node['pci_stats']) - return [self.fake_compute_node] + @mock.patch("nova.servicegroup.API.service_is_up", return_value=True) + @mock.patch("nova.db.compute_node_get_all") + def test_pci_detail(self, mock_db, mock_service): + self.fake_compute_node['pci_stats'] = jsonutils.dumps( + self.fake_compute_node['pci_stats']) - self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all) + mock_db.return_value = [self.fake_compute_node] hypervisor_id = 1 subs = { 'hypervisor_id': hypervisor_id From 98e6891dfd4408c56644f55fe3cff88703beb4bf Mon Sep 17 00:00:00 2001 From: John Warren Date: Wed, 11 Jun 2014 20:29:28 +0000 Subject: [PATCH 127/486] Check for resize path on libvirt instance delete If an instance is deleted after the instance's disk image path has been renamed by adding the "_resize" suffix to it but before the resize operation completes, the libvirt driver will not delete the orphaned files and manual intervention is needed to get them deleted. This fix addresses the issue by attempting to rename the instance path by adding a "_del" suffix and if that fails, renaming the instance path with the "_resize" suffix by replacing the "_resize" suffix with the "_del" suffix. If both renaming operations fail, the sequence is repeated, in case the the disk image path initially had the "_resize" suffix and another thread removed it before the second rename operation was attempted. These rename operations are used in favor of checking for the existence of paths and deleting if found, because rename operations are atomic whereas another thread could rename the path between the exist check and the deleting. Regardless of the outcome of the renaming operations, the existence of the instance path with the "_del" suffix is verified and if it exists, it is deleted. This is done in case a prior delete operation that managed to create the "_del" path was subsequently interrupted before all instance files could be deleted. Note that the LibvirtConnTestCase.test_delete_instance_files test case was removed in order to eliminate redundancy. Closes-Bug: #1308565 Change-Id: Ifcb2e18211347ccf3e5472779c5917a729a6eced --- nova/tests/virt/libvirt/test_driver.py | 185 +++++++++++++++++++------ nova/virt/libvirt/driver.py | 51 ++++++- 2 files changed, 188 insertions(+), 48 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index bd126cede0..d5038460e3 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -5717,9 +5717,10 @@ def _test_destroy_removes_disk(self, volume_fail=False): else: libvirt_driver.LibvirtDriver._disconnect_volume( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.mox.StubOutWithMock(shutil, "rmtree") - shutil.rmtree(os.path.join(CONF.instances_path, - 'instance-%08x' % int(instance['id']))) + self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, + 'delete_instance_files') + (libvirt_driver.LibvirtDriver.delete_instance_files(mox.IgnoreArg()). + AndReturn(True)) self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm') libvirt_driver.LibvirtDriver._cleanup_lvm(instance) @@ -5798,44 +5799,6 @@ def fake_unfilter_instance(instance, network_info): self.stubs.Set(os.path, 'exists', fake_os_path_exists) conn.destroy(self.context, instance, [], None, False) - def test_delete_instance_files(self): - instance = {"name": "instancename", "id": "42", - "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64", - "cleaned": 0, 'info_cache': None, 'security_groups': []} - - self.mox.StubOutWithMock(db, 'instance_get_by_uuid') - self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(shutil, "rmtree") - - db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), - columns_to_join=['info_cache', - 'security_groups'], - use_slave=False - ).AndReturn(instance) - os.path.exists(mox.IgnoreArg()).AndReturn(False) - os.path.exists(mox.IgnoreArg()).AndReturn(True) - shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid'])) - os.path.exists(mox.IgnoreArg()).AndReturn(True) - os.path.exists(mox.IgnoreArg()).AndReturn(False) - os.path.exists(mox.IgnoreArg()).AndReturn(True) - shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid'])) - os.path.exists(mox.IgnoreArg()).AndReturn(False) - self.mox.ReplayAll() - - def fake_obj_load_attr(self, attrname): - if not hasattr(self, attrname): - self[attrname] = {} - - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - self.stubs.Set(objects.Instance, 'fields', - {'id': int, 'uuid': str, 'cleaned': int}) - self.stubs.Set(objects.Instance, 'obj_load_attr', - fake_obj_load_attr) - - inst_obj = objects.Instance.get_by_uuid(None, instance['uuid']) - self.assertFalse(conn.delete_instance_files(inst_obj)) - self.assertTrue(conn.delete_instance_files(inst_obj)) - def test_reboot_different_ids(self): class FakeLoopingCall: def start(self, *a, **k): @@ -10205,6 +10168,146 @@ def test_rescue_config_drive(self): image_meta, rescue_password) self.mox.VerifyAll() + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files(self, get_instance_path, exists, exe, + shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + exists.side_effect = [False, False, True, False] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + exe.assert_called_with('mv', '/path', '/path_del') + shutil.assert_called_with('/path_del') + self.assertTrue(result) + + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files_resize(self, get_instance_path, exists, + exe, shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + nova.utils.execute.side_effect = [Exception(), None] + exists.side_effect = [False, False, True, False] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + expected = [mock.call('mv', '/path', '/path_del'), + mock.call('mv', '/path_resize', '/path_del')] + self.assertEqual(expected, exe.mock_calls) + shutil.assert_called_with('/path_del') + self.assertTrue(result) + + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files_failed(self, get_instance_path, exists, exe, + shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + exists.side_effect = [False, False, True, True] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + exe.assert_called_with('mv', '/path', '/path_del') + shutil.assert_called_with('/path_del') + self.assertFalse(result) + + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files_mv_failed(self, get_instance_path, exists, + exe, shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + nova.utils.execute.side_effect = Exception() + exists.side_effect = [True, True] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + expected = [mock.call('mv', '/path', '/path_del'), + mock.call('mv', '/path_resize', '/path_del')] * 2 + self.assertEqual(expected, exe.mock_calls) + self.assertFalse(result) + + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files_resume(self, get_instance_path, exists, + exe, shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + nova.utils.execute.side_effect = Exception() + exists.side_effect = [False, False, True, False] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + expected = [mock.call('mv', '/path', '/path_del'), + mock.call('mv', '/path_resize', '/path_del')] * 2 + self.assertEqual(expected, exe.mock_calls) + self.assertTrue(result) + + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files_none(self, get_instance_path, exists, + exe, shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + nova.utils.execute.side_effect = Exception() + exists.side_effect = [False, False, False, False] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + expected = [mock.call('mv', '/path', '/path_del'), + mock.call('mv', '/path_resize', '/path_del')] * 2 + self.assertEqual(expected, exe.mock_calls) + self.assertEqual(0, len(shutil.mock_calls)) + self.assertTrue(result) + + @mock.patch('shutil.rmtree') + @mock.patch('nova.utils.execute') + @mock.patch('os.path.exists') + @mock.patch('nova.virt.libvirt.utils.get_instance_path') + def test_delete_instance_files_concurrent(self, get_instance_path, exists, + exe, shutil): + lv = self.libvirtconnection + get_instance_path.return_value = '/path' + instance = objects.Instance(uuid='fake-uuid', id=1) + + nova.utils.execute.side_effect = [Exception(), Exception(), None] + exists.side_effect = [False, False, True, False] + + result = lv.delete_instance_files(instance) + get_instance_path.assert_called_with(instance) + expected = [mock.call('mv', '/path', '/path_del'), + mock.call('mv', '/path_resize', '/path_del')] + expected.append(expected[0]) + self.assertEqual(expected, exe.mock_calls) + shutil.assert_called_with('/path_del') + self.assertTrue(result) + class LibvirtVolumeUsageTestCase(test.TestCase): """Test for LibvirtDriver.get_all_volume_usage.""" diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 5cd37d911a..ae6f39ab3f 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -46,6 +46,7 @@ from eventlet import util as eventlet_util from lxml import etree from oslo.config import cfg +import six from nova.api.metadata import base as instance_metadata from nova import block_device @@ -5413,23 +5414,59 @@ def _delete_instance_files(self, instance): def delete_instance_files(self, instance): target = libvirt_utils.get_instance_path(instance) - if os.path.exists(target): - LOG.info(_LI('Deleting instance files %s'), target, + # A resize may be in progress + target_resize = target + '_resize' + # Other threads may attempt to rename the path, so renaming the path + # to target + '_del' (because it is atomic) and iterating through + # twice in the unlikely event that a concurrent rename occurs between + # the two rename attempts in this method. In general this method + # should be fairly thread-safe without these additional checks, since + # other operations involving renames are not permitted when the task + # state is not None and the task state should be set to something + # other than None by the time this method is invoked. + target_del = target + '_del' + for i in six.moves.range(2): + try: + utils.execute('mv', target, target_del) + break + except Exception: + pass + try: + utils.execute('mv', target_resize, target_del) + break + except Exception: + pass + # Either the target or target_resize path may still exist if all + # rename attempts failed. + remaining_path = None + for p in (target, target_resize): + if os.path.exists(p): + remaining_path = p + break + + # A previous delete attempt may have been interrupted, so target_del + # may exist even if all rename attempts during the present method + # invocation failed due to the absence of both target and + # target_resize. + if not remaining_path and os.path.exists(target_del): + LOG.info(_LI('Deleting instance files %s'), target_del, instance=instance) + remaining_path = target_del try: - shutil.rmtree(target) + shutil.rmtree(target_del) except OSError as e: LOG.error(_LE('Failed to cleanup directory %(target)s: ' - '%(e)s'), {'target': target, 'e': e}, + '%(e)s'), {'target': target_del, 'e': e}, instance=instance) # It is possible that the delete failed, if so don't mark the instance # as cleaned. - if os.path.exists(target): - LOG.info(_LI('Deletion of %s failed'), target, instance=instance) + if remaining_path and os.path.exists(remaining_path): + LOG.info(_LI('Deletion of %s failed'), remaining_path, + instance=instance) return False - LOG.info(_LI('Deletion of %s complete'), target, instance=instance) + LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance) return True @property From bf076b141e15438159094e2430f291574bd90e9e Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Tue, 22 Jul 2014 16:49:16 -0500 Subject: [PATCH 128/486] Rename virtutils to the more common libvirt_utils `libvirt_utils` is the more prevalent alias in the code for the module, so rename `virtutils` to match. Change-Id: Ib8d4291866bc53fa1980b70dd0f12e6b1fe10436 --- nova/tests/virt/libvirt/test_imagecache.py | 20 ++++++++++---------- nova/virt/libvirt/imagecache.py | 6 +++--- nova/virt/libvirt/volume.py | 10 +++++----- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/nova/tests/virt/libvirt/test_imagecache.py b/nova/tests/virt/libvirt/test_imagecache.py index 7536ebc696..5c05f4a14e 100644 --- a/nova/tests/virt/libvirt/test_imagecache.py +++ b/nova/tests/virt/libvirt/test_imagecache.py @@ -32,7 +32,7 @@ from nova.tests import fake_instance from nova import utils from nova.virt.libvirt import imagecache -from nova.virt.libvirt import utils as virtutils +from nova.virt.libvirt import utils as libvirt_utils CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') @@ -165,7 +165,7 @@ def test_list_backing_images_small(self): 'instance-00000002', 'instance-00000003']) self.stubs.Set(os.path, 'exists', lambda x: x.find('instance-') != -1) - self.stubs.Set(virtutils, 'get_disk_backing_file', + self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm') found = os.path.join(CONF.instances_path, @@ -187,7 +187,7 @@ def test_list_backing_images_resized(self): 'instance-00000002', 'instance-00000003']) self.stubs.Set(os.path, 'exists', lambda x: x.find('instance-') != -1) - self.stubs.Set(virtutils, 'get_disk_backing_file', + self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_' '10737418240')) @@ -210,7 +210,7 @@ def test_list_backing_images_instancename(self): lambda x: ['_base', 'banana-42-hamster']) self.stubs.Set(os.path, 'exists', lambda x: x.find('banana-42-hamster') != -1) - self.stubs.Set(virtutils, 'get_disk_backing_file', + self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm') found = os.path.join(CONF.instances_path, @@ -235,7 +235,7 @@ def test_list_backing_images_disk_notexist(self): def fake_get_disk(disk_path): raise processutils.ProcessExecutionError() - self.stubs.Set(virtutils, 'get_disk_backing_file', fake_get_disk) + self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk) image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [] @@ -424,7 +424,7 @@ def test_handle_base_image_unused(self): self.assertEqual(image_cache_manager.corrupt_base_files, []) def test_handle_base_image_used(self): - self.stubs.Set(virtutils, 'chown', lambda x, y: None) + self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None) img = '123' with self._make_base_file() as fname: @@ -440,7 +440,7 @@ def test_handle_base_image_used(self): self.assertEqual(image_cache_manager.corrupt_base_files, []) def test_handle_base_image_used_remotely(self): - self.stubs.Set(virtutils, 'chown', lambda x, y: None) + self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None) img = '123' with self._make_base_file() as fname: @@ -491,7 +491,7 @@ def test_handle_base_image_used_missing(self): def test_handle_base_image_checksum_fails(self): self.flags(checksum_base_images=True, group='libvirt') - self.stubs.Set(virtutils, 'chown', lambda x, y: None) + self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None) img = '123' @@ -569,7 +569,7 @@ def exists(path): self.stubs.Set(os.path, 'exists', lambda x: exists(x)) - self.stubs.Set(virtutils, 'chown', lambda x, y: None) + self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None) # We need to stub utime as well self.stubs.Set(os, 'utime', lambda x, y: None) @@ -633,7 +633,7 @@ def get_disk_backing_file(path): return fq_path('%s_5368709120' % hashed_1) self.fail('Unexpected backing file lookup: %s' % path) - self.stubs.Set(virtutils, 'get_disk_backing_file', + self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: get_disk_backing_file(x)) # Fake out verifying checksums, as that is tested elsewhere diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index 7a3282f1ef..cd17d11e2c 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -37,7 +37,7 @@ from nova.openstack.common import processutils from nova import utils from nova.virt import imagecache -from nova.virt.libvirt import utils as virtutils +from nova.virt.libvirt import utils as libvirt_utils LOG = logging.getLogger(__name__) @@ -291,7 +291,7 @@ def _list_backing_images(self): if os.path.exists(disk_path): LOG.debug('%s has a disk file', ent) try: - backing_file = virtutils.get_disk_backing_file( + backing_file = libvirt_utils.get_disk_backing_file( disk_path) except processutils.ProcessExecutionError: # (for bug 1261442) @@ -516,7 +516,7 @@ def _handle_base_image(self, img_id, base_file): {'id': img_id, 'base_file': base_file}) if os.path.exists(base_file): - virtutils.chown(base_file, os.getuid()) + libvirt_utils.chown(base_file, os.getuid()) os.utime(base_file, None) def _age_and_verify_cached_images(self, context, all_instances, base_dir): diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index d6aef9d1de..e011858203 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -36,7 +36,7 @@ from nova.storage import linuxscsi from nova import utils from nova.virt.libvirt import config as vconfig -from nova.virt.libvirt import utils as virtutils +from nova.virt.libvirt import utils as libvirt_utils LOG = logging.getLogger(__name__) @@ -97,7 +97,7 @@ def connect_volume(self, connection_info, disk_info): """Connect the volume. Returns xml for libvirt.""" conf = vconfig.LibvirtConfigGuestDisk() - conf.driver_name = virtutils.pick_disk_driver_name( + conf.driver_name = libvirt_utils.pick_disk_driver_name( self.connection._get_hypervisor_version(), self.is_block_dev ) @@ -674,7 +674,7 @@ def _ensure_mounted(self, nfs_export, options=None): """ mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base, utils.get_hash_str(nfs_export)) - if not virtutils.is_mounted(mount_path, nfs_export): + if not libvirt_utils.is_mounted(mount_path, nfs_export): self._mount_nfs(mount_path, nfs_export, options, ensure=True) return mount_path @@ -824,7 +824,7 @@ def _ensure_mounted(self, glusterfs_export, options=None): """ mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base, utils.get_hash_str(glusterfs_export)) - if not virtutils.is_mounted(mount_path, glusterfs_export): + if not libvirt_utils.is_mounted(mount_path, glusterfs_export): self._mount_glusterfs(mount_path, glusterfs_export, options, ensure=True) return mount_path @@ -894,7 +894,7 @@ def connect_volume(self, connection_info, disk_info): # We need to look for wwns on every hba # because we don't know ahead of time # where they will show up. - hbas = virtutils.get_fc_hbas_info() + hbas = libvirt_utils.get_fc_hbas_info() host_devices = [] for hba in hbas: pci_num = self._get_pci_num(hba) From f3f07f4401ff8be25598493c088e4e100b387fe4 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 22 Jul 2014 13:40:33 -0700 Subject: [PATCH 129/486] Turn periodic tasks off in all unit tests We shouldn't rely on periodic tasks to randomly run in unit tests, instead if we need them to run we should explicitly call them out. Change-Id: I35850c47288e50d3146771dc7a9a54d62e36c3cf Related-Bug: 1311778 --- nova/test.py | 1 + nova/tests/integrated/integrated_helpers.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/test.py b/nova/test.py index 1698e250bb..8c0cfff266 100644 --- a/nova/test.py +++ b/nova/test.py @@ -325,6 +325,7 @@ def setUp(self): CONF.set_override('fatal_exception_format_errors', True) CONF.set_override('enabled', True, 'osapi_v3') CONF.set_override('force_dhcp_release', False) + CONF.set_override('periodic_enable', False) def _restore_obj_registry(self): objects_base.NovaObject._obj_classes = self._base_test_obj_backup diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 62c36a0889..dc04dacc77 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -69,7 +69,6 @@ def setUp(self): f = self._get_flags() self.flags(**f) self.flags(verbose=True) - self.flags(periodic_enable=False) self.useFixture(test.ReplaceModule('crypto', fake_crypto)) nova.tests.image.fake.stub_out_image_service(self.stubs) From 9b14ae62a6396a2599c8a2ada1a50e3f69a2eddc Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 8 Apr 2014 11:28:27 -0700 Subject: [PATCH 130/486] VMware: do not cache image when root_gb is 0 Save space on datastore when the instance size has root_gb of 0. There is no need to cache the disk as it already exists on the datastore. TrivialFix Change-Id: Iac75d97f40a1a91b8fa2df7fb4318fc2965fd93a Closes-bug: #1304593 --- nova/tests/virt/vmwareapi/test_driver_api.py | 12 ++++++++++++ nova/virt/vmwareapi/vmops.py | 7 +++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index a4a0937c5c..3c929d781b 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -764,6 +764,18 @@ def test_spawn_no_power_on(self): def test_spawn_power_on(self): self._spawn_power_state(True) + def test_spawn_root_size_0(self): + self._create_vm(instance_type='m1.micro') + info = self.conn.get_info({'uuid': self.uuid, + 'node': self.instance_node}) + self._check_vm_info(info, power_state.RUNNING) + cache = ('[%s] vmware_base/%s/%s.vmdk' % + (self.ds, self.fake_image_uuid, self.fake_image_uuid)) + gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' % + (self.ds, self.fake_image_uuid, self.fake_image_uuid)) + self.assertTrue(vmwareapi_fake.get_file(cache)) + self.assertFalse(vmwareapi_fake.get_file(gb_cache)) + def _spawn_with_delete_exception(self, fault=None): def fake_call_method(module, method, *args, **kwargs): diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index a726e1a3cb..890cccc35e 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -459,8 +459,11 @@ def _get_image_properties(root_size): root_vmdk_path, dc_info.ref) else: upload_folder = '%s/%s' % (self._base_folder, upload_name) - root_vmdk_name = "%s.%s.vmdk" % (upload_name, - instance.root_gb) + if root_gb: + root_vmdk_name = "%s.%s.vmdk" % (upload_name, + instance.root_gb) + else: + root_vmdk_name = "%s.vmdk" % upload_name root_vmdk_path = str(datastore.build_path( upload_folder, root_vmdk_name)) From 80df9f5ec53cc71c1ec51a8590921ae5b776ec22 Mon Sep 17 00:00:00 2001 From: shuangtai Date: Thu, 10 Jul 2014 19:58:58 +0800 Subject: [PATCH 131/486] Catch CannotResizeDisk exception when resize to zero disk When old flavor's root_gb is not equal 0 and new flavor's root_gb is 0, the resize() in nova.compute.api will raise CannotResizeDisk. Move up the new_instance_type check before using. Closes-Bug: #1340159 Change-Id: I4abf93530cf919af50a88d6049019fb745547257 --- .../openstack/compute/plugins/v3/servers.py | 2 ++ nova/api/openstack/compute/servers.py | 2 ++ nova/compute/api.py | 6 +++--- .../compute/plugins/v3/test_server_actions.py | 19 +++++++++++++++++++ .../openstack/compute/test_server_actions.py | 18 ++++++++++++++++++ 5 files changed, 44 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index eb34c18804..b809ee8d83 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -701,6 +701,8 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): except exception.CannotResizeToSameFlavor: msg = _("Resize requires a flavor change.") raise exc.HTTPBadRequest(explanation=msg) + except exception.CannotResizeDisk as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 48d99d1a2d..7ea07b0e9a 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -1173,6 +1173,8 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): except exception.CannotResizeToSameFlavor: msg = _("Resize requires a flavor change.") raise exc.HTTPBadRequest(explanation=msg) + except exception.CannotResizeDisk as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: diff --git a/nova/compute/api.py b/nova/compute/api.py index 612e1c1da2..119e6af2b4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -2398,6 +2398,9 @@ def resize(self, context, instance, flavor_id=None, reason = _('Resize to zero disk flavor is not allowed.') raise exception.CannotResizeDisk(reason=reason) + if not new_instance_type: + raise exception.FlavorNotFound(flavor_id=flavor_id) + current_instance_type_name = current_instance_type['name'] new_instance_type_name = new_instance_type['name'] LOG.debug("Old instance type %(current_instance_type_name)s, " @@ -2406,9 +2409,6 @@ def resize(self, context, instance, flavor_id=None, 'new_instance_type_name': new_instance_type_name}, instance=instance) - if not new_instance_type: - raise exception.FlavorNotFound(flavor_id=flavor_id) - same_instance_type = (current_instance_type['id'] == new_instance_type['id']) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index b02bb5f478..9618df12e8 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -15,6 +15,7 @@ import uuid +import mock import mox from oslo.config import cfg import webob @@ -685,6 +686,24 @@ def fake_resize(*args, **kwargs): self.controller._action_resize, req, FAKE_UUID, body) + @mock.patch('nova.compute.api.API.resize', + side_effect=exception.CannotResizeDisk(reason='')) + def test_resize_raises_cannot_resize_disk(self, mock_resize): + body = dict(resize=dict(flavor_ref="http://localhost/3")) + req = fakes.HTTPRequestV3.blank(self.url) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_resize, + req, FAKE_UUID, body) + + @mock.patch('nova.compute.api.API.resize', + side_effect=exception.FlavorNotFound(reason='')) + def test_resize_raises_flavor_not_found(self, mock_resize): + body = dict(resize=dict(flavor_ref="http://localhost/3")) + req = fakes.HTTPRequestV3.blank(self.url) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_resize, + req, FAKE_UUID, body) + def test_resize_raises_conflict_on_invalid_state(self): body = dict(resize=dict(flavor_ref="http://localhost/3")) diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index de0c403344..a97c19aec0 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -839,6 +839,24 @@ def _fake_resize(obj, context, instance, flavor_id): req, FAKE_UUID, body) self.assertEqual(self.resize_called, call_no + 1) + @mock.patch('nova.compute.api.API.resize', + side_effect=exception.CannotResizeDisk(reason='')) + def test_resize_raises_cannot_resize_disk(self, mock_resize): + body = dict(resize=dict(flavorRef="http://localhost/3")) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_resize, + req, FAKE_UUID, body) + + @mock.patch('nova.compute.api.API.resize', + side_effect=exception.FlavorNotFound(reason='')) + def test_resize_raises_flavor_not_found(self, mock_resize): + body = dict(resize=dict(flavorRef="http://localhost/3")) + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_resize, + req, FAKE_UUID, body) + def test_resize_with_too_many_instances(self): body = dict(resize=dict(flavorRef="http://localhost/3")) From 8159e1daa21c315dcd10cd77164d3c364ee93047 Mon Sep 17 00:00:00 2001 From: Michael Still Date: Wed, 23 Jul 2014 12:18:55 +1000 Subject: [PATCH 132/486] Convert to importutils We had some remaining users of __import__ that really deserved to be converted to oslo's importutils. Change-Id: I8c0c3e885151018b974025087a4abc96dd6ae77c --- nova/virt/disk/vfs/guestfs.py | 3 ++- nova/virt/libvirt/driver.py | 2 +- nova/virt/libvirt/firewall.py | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py index 95e611561a..a65f649ab3 100644 --- a/nova/virt/disk/vfs/guestfs.py +++ b/nova/virt/disk/vfs/guestfs.py @@ -16,6 +16,7 @@ from nova import exception from nova.i18n import _ +from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.virt.disk.vfs import api as vfs @@ -37,7 +38,7 @@ def __init__(self, imgfile, imgfmt='raw', partition=None): global guestfs if guestfs is None: - guestfs = __import__('guestfs') + guestfs = importutils.import_module('guestfs') self.handle = None diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 5cd37d911a..ea430513fd 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -323,7 +323,7 @@ def __init__(self, virtapi, read_only=False): global libvirt if libvirt is None: - libvirt = __import__('libvirt') + libvirt = importutils.import_module('libvirt') self._skip_list_all_domains = False self._host_state = None diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index f935c7163e..c8922fc278 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -20,6 +20,7 @@ from nova.cloudpipe import pipelib from nova.i18n import _LI from nova.i18n import _LW +from nova.openstack.common import importutils from nova.openstack.common import log as logging import nova.virt.firewall as base_firewall from nova.virt import netutils @@ -44,7 +45,7 @@ def __init__(self, virtapi, get_connection, **kwargs): global libvirt if libvirt is None: try: - libvirt = __import__('libvirt') + libvirt = importutils.import_module('libvirt') except ImportError: LOG.warn(_LW("Libvirt module could not be loaded. " "NWFilterFirewall will not work correctly.")) From a09e7327aeba03eb6f2b1ae91d49aec9c6475773 Mon Sep 17 00:00:00 2001 From: Xu Han Peng Date: Fri, 20 Jun 2014 17:37:26 +0800 Subject: [PATCH 133/486] Raise specific error of network IP allocation Currently Nova raise "Neutron error: quota exceeded" when neutron client state code is 409 in IP allocation without caring about if the real error is over quota. This fixes raises PortLimitExceeded if error from neutron client is OverQuota and raises NoMoreFixedIps if error from neutron client is IpAddressGenerationFailure. If the error is neither of these two errors above, it reraises the original exception from neutron client. Change-Id: I0d1942587a359acccfca5298c2dcb4b21d971a42 Closes-Bug: 1331353 --- nova/network/neutronv2/api.py | 19 +++++++++++++------ nova/tests/network/test_neutronv2.py | 24 +++++++++++++++++++++--- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f26224de87..f1a3654e44 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -24,7 +24,7 @@ from nova.compute import utils as compute_utils from nova import conductor from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LW from nova.network import base_api from nova.network import model as network_model from nova.network import neutronv2 @@ -185,6 +185,8 @@ def _create_port(self, port_client, instance, network_id, port_req_body, :param dhcp_opts: Optional DHCP options. :returns: ID of the created port. :raises PortLimitExceeded: If neutron fails with an OverQuota error. + :raises NoMoreFixedIps: If neutron fails with + IpAddressGenerationFailure error. """ try: if fixed_ip: @@ -206,11 +208,16 @@ def _create_port(self, port_client, instance, network_id, port_req_body, LOG.debug('Successfully created port: %s', port_id, instance=instance) return port_id - except neutron_client_exc.NeutronClientException as e: - # NOTE(mriedem): OverQuota in neutron is a 409 - if e.status_code == 409: - LOG.warning(_('Neutron error: quota exceeded')) - raise exception.PortLimitExceeded() + except neutron_client_exc.OverQuotaClient: + LOG.warning(_LW( + 'Neutron error: Port quota exceeded in tenant: %s'), + port_req_body['port']['tenant_id'], instance=instance) + raise exception.PortLimitExceeded() + except neutron_client_exc.IpAddressGenerationFailureClient: + LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'), + network_id, instance=instance) + raise exception.NoMoreFixedIps() + except neutron_client_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_('Neutron error creating port on network %s'), network_id, instance=instance) diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 5a08f3997d..5bd1696054 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -34,6 +34,7 @@ from nova.network.neutronv2 import constants from nova.openstack.common import jsonutils from nova import test +from nova.tests import fake_instance from nova import utils CONF = cfg.CONF @@ -932,9 +933,7 @@ def test_allocate_for_instance_ex1(self): self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn({'port': port}) else: - NeutronOverQuota = exceptions.NeutronClientException( - message="Quota exceeded for resources: ['port']", - status_code=409) + NeutronOverQuota = exceptions.OverQuotaClient() self.moxed_client.create_port( MyComparator(port_req_body)).AndRaise(NeutronOverQuota) index += 1 @@ -2327,6 +2326,25 @@ def test_allocate_floating_ip_exceed_limit(self): api.allocate_floating_ip, self.context, pool_name) + def test_create_port_for_instance_no_more_ip(self): + instance = fake_instance.fake_instance_obj(self.context) + net = {'id': 'my_netid1', + 'name': 'my_netname1', + 'subnets': ['mysubnid1'], + 'tenant_id': instance['project_id']} + + with mock.patch.object(client.Client, 'create_port', + side_effect=exceptions.IpAddressGenerationFailureClient()) as ( + create_port_mock): + zone = 'compute:%s' % instance['availability_zone'] + port_req_body = {'port': {'device_id': instance['uuid'], + 'device_owner': zone}} + self.assertRaises(exception.NoMoreFixedIps, + self.api._create_port, + neutronv2.get_client(self.context), + instance, net['id'], port_req_body) + create_port_mock.assert_called_once_with(port_req_body) + class TestNeutronv2ModuleMethods(test.TestCase): From 6169e05a4829c502b279e68f96cec5932f731ef2 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 24 Mar 2014 15:16:17 +0800 Subject: [PATCH 134/486] Rollback quota in os_tenant_network when we call API with 'os_tenant_networks' to delete an existing network it might fail due to unexpected reason, we should rollback the quota we reserved otherwise we will face no quota later. Change-Id: Ic74a7a8df337070f17b1ea6916809fa93880a287 Closes-Bug: 1296489 --- .../compute/contrib/os_tenant_networks.py | 15 +++++-- .../compute/contrib/test_networks.py | 41 +++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py index 71556b7cf5..2cfa9baaee 100644 --- a/nova/api/openstack/compute/contrib/os_tenant_networks.py +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -115,6 +115,7 @@ def show(self, req, id): def delete(self, req, id): context = req.environ['nova.context'] authorize(context) + reservation = None try: if CONF.enable_network_quota: reservation = QUOTAS.reserve(context, networks=-1) @@ -125,19 +126,27 @@ def delete(self, req, id): LOG.info(_LI("Deleting network with id %s"), id) + def _rollback_quota(reservation): + if CONF.enable_network_quota and reservation: + QUOTAS.rollback(context, reservation) + try: self.network_api.delete(context, id) - if CONF.enable_network_quota and reservation: - QUOTAS.commit(context, reservation) - response = exc.HTTPAccepted() except exception.PolicyNotAuthorized as e: + _rollback_quota(reservation) raise exc.HTTPForbidden(explanation=str(e)) except exception.NetworkInUse as e: + _rollback_quota(reservation) raise exc.HTTPConflict(explanation=e.format_message()) except exception.NetworkNotFound: + _rollback_quota(reservation) msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) + if CONF.enable_network_quota and reservation: + QUOTAS.commit(context, reservation) + response = exc.HTTPAccepted() + return response def create(self, req, body): diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py index cf9e78a22f..48fe473217 100644 --- a/nova/tests/api/openstack/compute/contrib/test_networks.py +++ b/nova/tests/api/openstack/compute/contrib/test_networks.py @@ -19,12 +19,14 @@ import math import uuid +import mock import netaddr from oslo.config import cfg import webob from nova.api.openstack.compute.contrib import networks_associate from nova.api.openstack.compute.contrib import os_networks as networks +from nova.api.openstack.compute.contrib import os_tenant_networks as tnet import nova.context from nova import exception from nova import test @@ -394,3 +396,42 @@ def test_network_neutron_disassociate_not_implemented(self): self.assertRaises(webob.exc.HTTPNotImplemented, controller._disassociate_host_and_project, req, uuid, {'disassociate': None}) + + +class TenantNetworksTest(test.NoDBTestCase): + def setUp(self): + super(TenantNetworksTest, self).setUp() + self.controller = tnet.NetworkController() + self.flags(enable_network_quota=True) + + @mock.patch('nova.quota.QUOTAS.reserve') + @mock.patch('nova.quota.QUOTAS.rollback') + @mock.patch('nova.network.api.API.delete') + def _test_network_delete_exception(self, ex, expex, delete_mock, + rollback_mock, reserve_mock): + req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks') + ctxt = req.environ['nova.context'] + + reserve_mock.return_value = 'rv' + delete_mock.side_effect = ex + + self.assertRaises(expex, self.controller.delete, req, 1) + + delete_mock.assert_called_once_with(ctxt, 1) + rollback_mock.assert_called_once_with(ctxt, 'rv') + reserve_mock.assert_called_once_with(ctxt, networks=-1) + + def test_network_delete_exception_network_not_found(self): + ex = exception.NetworkNotFound(network_id=1) + expex = webob.exc.HTTPNotFound + self._test_network_delete_exception(ex, expex) + + def test_network_delete_exception_policy_failed(self): + ex = exception.PolicyNotAuthorized(action='dummy') + expex = webob.exc.HTTPForbidden + self._test_network_delete_exception(ex, expex) + + def test_network_delete_exception_network_in_use(self): + ex = exception.NetworkInUse(network_id=1) + expex = webob.exc.HTTPConflict + self._test_network_delete_exception(ex, expex) From 39c170a73474c8f76fad5c2ee596edd4b85feac7 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Sun, 20 Jul 2014 03:54:42 +0800 Subject: [PATCH 135/486] handle AutoDiskConfigDisabledByImage at API layer nova compute api layer might raise AutoDiskConfigDisabledByImage, so API layer need to catch them and report correct info. Also, create api handle lots of exceptions which inherit from Invalid, this patch combined them and catch Invalid only. Change-Id: Id0c89e96637d2f82b5bd204eba2c6291b9a5dd38 --- nova/api/openstack/compute/servers.py | 15 ++++------ .../openstack/compute/test_server_actions.py | 28 +++++++++++++++++++ .../api/openstack/compute/test_servers.py | 9 ++++++ 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 5077b188b3..3b49bd7633 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -983,23 +983,18 @@ def create(self, req, body): except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, - exception.InvalidMetadata, - exception.InvalidRequest, - exception.MultiplePortsNotApplicable, - exception.InvalidFixedIpAndMaxCountRequest, exception.NetworkNotFound, exception.PortNotFound, exception.FixedIpAlreadyInUse, exception.SecurityGroupNotFound, - exception.InvalidBDM, - exception.PortRequiresFixedIP, - exception.NetworkRequiresSubnet, exception.InstanceUserDataTooLarge, exception.InstanceUserDataMalformed) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except (exception.PortInUse, exception.NoUniqueMatch) as error: raise exc.HTTPConflict(explanation=error.format_message()) + except exception.Invalid as error: + raise exc.HTTPBadRequest(explanation=error.format_message()) # If the caller wanted a reservation_id, return it if ret_resv_id: @@ -1189,7 +1184,8 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): except exception.Invalid: msg = _("Invalid instance image.") raise exc.HTTPBadRequest(explanation=msg) - except exception.NoValidHost as e: + except (exception.NoValidHost, + exception.AutoDiskConfigDisabledByImage) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) return webob.Response(status_int=202) @@ -1396,7 +1392,8 @@ def _action_rebuild(self, req, id, body): except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, - exception.InvalidMetadata) as error: + exception.InvalidMetadata, + exception.AutoDiskConfigDisabledByImage) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) instance = self._get_server(context, req, id) diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 04c29d0bdb..eb938048a8 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -763,6 +763,22 @@ def return_image_meta(*args, **kwargs): self.assertEqual(instance_meta['kernel_id'], '1') self.assertEqual(instance_meta['ramdisk_id'], '2') + @mock.patch.object(compute_api.API, 'rebuild') + def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild): + body = { + "rebuild": { + "imageRef": self._image_href, + }, + } + + req = fakes.HTTPRequest.blank(self.url) + mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage( + image='dummy') + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_rebuild, + req, FAKE_UUID, body) + def test_resize_server(self): body = dict(resize=dict(flavorRef="http://localhost/3")) @@ -871,6 +887,18 @@ def test_resize_raises_no_valid_host(self, mock_resize): self.controller._action_resize, req, FAKE_UUID, body) + @mock.patch.object(compute_api.API, 'resize') + def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize): + mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage( + image='dummy') + + body = dict(resize=dict(flavorRef="http://localhost/3")) + + req = fakes.HTTPRequest.blank(self.url) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_resize, + req, FAKE_UUID, body) + def test_confirm_resize_server(self): body = dict(confirmResize=None) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 56306e1c77..5de4c32024 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -2166,6 +2166,15 @@ def test_create_instance_raise_user_data_too_large(self, mock_create): self.controller.create, self.req, self.body) + @mock.patch.object(compute_api.API, 'create') + def test_create_instance_raise_auto_disk_config_exc(self, mock_create): + mock_create.side_effect = exception.AutoDiskConfigDisabledByImage( + image='dummy') + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, + self.req, self.body) + def test_create_instance_with_network_with_no_subnet(self): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' From b5ba4d603b9b18df8ded322c02d24105ef92953a Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Fri, 23 May 2014 14:08:52 +0100 Subject: [PATCH 136/486] libvirt: add parsing of NUMA topology in capabilities XML Introduce classes to nova.virt.libvirt.config for parsing the NUMA topology information in the capabilities XML schema. Blueprint: virt-driver-numa-placement Change-Id: I4948514cf38268663cdcf4bb31f335a19a7ec9d1 --- nova/tests/virt/libvirt/test_config.py | 22 +++++ nova/virt/libvirt/config.py | 118 +++++++++++++++++++++++++ 2 files changed, 140 insertions(+) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index e0363ed3e3..04d0df5d9b 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -70,6 +70,28 @@ def test_config_host(self): + + + + 4048280 + + + + + + + + + 4127684 + + + + + + + + + hvm diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 7ff03ff6b5..228cdb48c1 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -111,6 +111,118 @@ def format_dom(self): return caps +class LibvirtConfigCapsNUMATopology(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigCapsNUMATopology, self).__init__( + root_name="topology", + **kwargs) + + self.cells = [] + + def parse_dom(self, xmldoc): + super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc) + + xmlcells = xmldoc.getchildren()[0] + for xmlcell in xmlcells.getchildren(): + cell = LibvirtConfigCapsNUMACell() + cell.parse_dom(xmlcell) + self.cells.append(cell) + + def format_dom(self): + topo = super(LibvirtConfigCapsNUMATopology, self).format_dom() + + cells = etree.Element("cells") + cells.set("num", str(len(self.cells))) + topo.append(cells) + + for cell in self.cells: + cells.append(cell.format_dom()) + + return topo + + +class LibvirtConfigCapsNUMACell(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell", + **kwargs) + + self.id = None + self.memory = None + self.cpus = [] + + def parse_dom(self, xmldoc): + super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc) + + self.id = int(xmldoc.get("id")) + for c in xmldoc.getchildren(): + if c.tag == "memory": + self.memory = int(c.text) + elif c.tag == "cpus": + for c2 in c.getchildren(): + cpu = LibvirtConfigCapsNUMACPU() + cpu.parse_dom(c2) + self.cpus.append(cpu) + + def format_dom(self): + cell = super(LibvirtConfigCapsNUMACell, self).format_dom() + + cell.set("id", str(self.id)) + + mem = etree.Element("memory") + mem.set("unit", "KiB") + mem.text = str(self.memory) + cell.append(mem) + + cpus = etree.Element("cpus") + cpus.set("num", str(len(self.cpus))) + for cpu in self.cpus: + cpus.append(cpu.format_dom()) + cell.append(cpus) + + return cell + + +class LibvirtConfigCapsNUMACPU(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu", + **kwargs) + + self.id = None + self.socket_id = None + self.core_id = None + self.siblings = None + + def parse_dom(self, xmldoc): + super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc) + + self.id = int(xmldoc.get("id")) + if xmldoc.get("socket_id") is not None: + self.socket_id = int(xmldoc.get("socket_id")) + if xmldoc.get("core_id") is not None: + self.core_id = int(xmldoc.get("core_id")) + + if xmldoc.get("siblings") is not None: + self.siblings = hardware.parse_cpu_spec( + xmldoc.get("siblings")) + + def format_dom(self): + cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom() + + cpu.set("id", str(self.id)) + if self.socket_id is not None: + cpu.set("socket_id", str(self.socket_id)) + if self.core_id is not None: + cpu.set("core_id", str(self.core_id)) + if self.siblings is not None: + cpu.set("siblings", + hardware.format_cpu_spec(self.siblings)) + + return cpu + + class LibvirtConfigCapsHost(LibvirtConfigObject): def __init__(self, **kwargs): @@ -119,6 +231,7 @@ def __init__(self, **kwargs): self.cpu = None self.uuid = None + self.topology = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsHost, self).parse_dom(xmldoc) @@ -130,6 +243,9 @@ def parse_dom(self, xmldoc): self.cpu = cpu elif c.tag == "uuid": self.uuid = c.text + elif c.tag == "topology": + self.topology = LibvirtConfigCapsNUMATopology() + self.topology.parse_dom(c) def format_dom(self): caps = super(LibvirtConfigCapsHost, self).format_dom() @@ -138,6 +254,8 @@ def format_dom(self): caps.append(self._text_node("uuid", self.uuid)) if self.cpu: caps.append(self.cpu.format_dom()) + if self.topology: + caps.append(self.topology.format_dom()) return caps From c9a3b3f89c6e56a8e736ef17ff4d02cbb8d8f2bb Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Tue, 10 Jun 2014 16:36:59 +0100 Subject: [PATCH 137/486] libvirt: add support for per-vCPU pinning in guest XML Expand LibvirtConfigGuestCPUTune to allow setting of a cpu mask per-vCPU Blueprint: virt-driver-cpu-pinning Change-Id: I547be6c7da4e3ebc9759f8310cb6282ac4b2f50c --- nova/tests/virt/libvirt/test_config.py | 26 ++++++++++++++++++++++++++ nova/virt/libvirt/config.py | 25 +++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index 04d0df5d9b..4f28b37358 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -1758,6 +1758,32 @@ def test_config_cputune_timeslice(self): 25000 """) + def test_config_cputune_vcpus(self): + cputune = config.LibvirtConfigGuestCPUTune() + + vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin() + vcpu0.id = 0 + vcpu0.cpuset = set([0, 1]) + vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin() + vcpu1.id = 1 + vcpu1.cpuset = set([2, 3]) + vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin() + vcpu2.id = 2 + vcpu2.cpuset = set([4, 5]) + vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin() + vcpu3.id = 3 + vcpu3.cpuset = set([6, 7]) + cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3]) + + xml = cputune.to_xml() + self.assertXmlEqual(xml, """ + + + + + + """) + class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 228cdb48c1..858f98473b 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -1260,6 +1260,27 @@ def format_dom(self): return dev +class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__( + root_name="vcpupin", + **kwargs) + + self.id = None + self.cpuset = None + + def format_dom(self): + root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom() + + root.set("vcpu", str(self.id)) + if self.cpuset is not None: + root.set("cpuset", + hardware.format_cpu_spec(self.cpuset)) + + return root + + class LibvirtConfigGuestCPUTune(LibvirtConfigObject): def __init__(self, **kwargs): @@ -1268,6 +1289,7 @@ def __init__(self, **kwargs): self.shares = None self.quota = None self.period = None + self.vcpupin = [] def format_dom(self): root = super(LibvirtConfigGuestCPUTune, self).format_dom() @@ -1279,6 +1301,9 @@ def format_dom(self): if self.period is not None: root.append(self._text_node("period", str(self.period))) + for vcpu in self.vcpupin: + root.append(vcpu.format_dom()) + return root From b977c07e9dfd7325f8ffaa07efac488f3fe30fa8 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Tue, 27 May 2014 15:58:49 +0100 Subject: [PATCH 138/486] libvirt: add support for memory backing parameters Add a libvirt config class for dealing with the XML elements under the section of the guest schema. Blueprint: virt-driver-large-pages Change-Id: Icd89508671a8951bed480b901b2d12d44ebb7cab --- nova/tests/virt/libvirt/test_config.py | 28 ++++++++++++++++++++++++++ nova/virt/libvirt/config.py | 26 ++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index 4f28b37358..7e2fcd3f8b 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -1136,6 +1136,9 @@ def test_config_kvm(self): obj.cputune.quota = 50000 obj.cputune.period = 25000 + obj.membacking = config.LibvirtConfigGuestMemoryBacking() + obj.membacking.hugepages = True + obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "linux" @@ -1162,6 +1165,9 @@ def test_config_kvm(self): b38a3f43-4be2-4046-897f-b67c2f5e0147 demo 104857600 + + + 2 @@ -1785,6 +1791,28 @@ def test_config_cputune_vcpus(self): """) +class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest): + def test_config_memory_backing_none(self): + obj = config.LibvirtConfigGuestMemoryBacking() + + xml = obj.to_xml() + self.assertXmlEqual(xml, "") + + def test_config_memory_backing_all(self): + obj = config.LibvirtConfigGuestMemoryBacking() + obj.locked = True + obj.sharedpages = False + obj.hugepages = True + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + + + + """) + + class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest): def test_config_metadata(self): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 858f98473b..0255ac1167 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -1307,6 +1307,29 @@ def format_dom(self): return root +class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestMemoryBacking, self).__init__( + root_name="memoryBacking", **kwargs) + + self.hugepages = False + self.sharedpages = True + self.locked = False + + def format_dom(self): + root = super(LibvirtConfigGuestMemoryBacking, self).format_dom() + + if self.hugepages: + root.append(etree.Element("hugepages")) + if not self.sharedpages: + root.append(etree.Element("nosharedpages")) + if self.locked: + root.append(etree.Element("locked")) + + return root + + class LibvirtConfigGuest(LibvirtConfigObject): def __init__(self, **kwargs): @@ -1317,6 +1340,7 @@ def __init__(self, **kwargs): self.uuid = None self.name = None self.memory = 500 * units.Mi + self.membacking = None self.vcpus = 1 self.cpuset = None self.cpu = None @@ -1342,6 +1366,8 @@ def _format_basic_props(self, root): root.append(self._text_node("uuid", self.uuid)) root.append(self._text_node("name", self.name)) root.append(self._text_node("memory", self.memory)) + if self.membacking is not None: + root.append(self.membacking.format_dom()) if self.cpuset is not None: vcpu = self._text_node("vcpu", self.vcpus) vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset)) From f37a5e12b44f7b1259dca1a04911a40c6fd38e13 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 9 Jul 2014 11:00:31 +0100 Subject: [PATCH 139/486] libvirt: add support for memory tuning in config Add a libvirt config class for dealing with the XML elements under the element of the guest schema Blueprint: virt-driver-large-pages Change-Id: I141fcf4fd95f61f7fc3a5d0209dc2b1319ede941 --- nova/tests/virt/libvirt/test_config.py | 36 +++++++++++++++++++ nova/virt/libvirt/config.py | 48 +++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 5 deletions(-) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index 7e2fcd3f8b..115e4554be 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -1139,6 +1139,12 @@ def test_config_kvm(self): obj.membacking = config.LibvirtConfigGuestMemoryBacking() obj.membacking.hugepages = True + obj.memtune = config.LibvirtConfigGuestMemoryTune() + obj.memtune.hard_limit = 496 + obj.memtune.soft_limit = 672 + obj.memtune.swap_hard_limit = 1638 + obj.memtune.min_guarantee = 2970 + obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "linux" @@ -1168,6 +1174,12 @@ def test_config_kvm(self): + + 496 + 672 + 1638 + 2970 + 2 @@ -1813,6 +1825,30 @@ def test_config_memory_backing_all(self): """) +class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest): + def test_config_memory_backing_none(self): + obj = config.LibvirtConfigGuestMemoryTune() + + xml = obj.to_xml() + self.assertXmlEqual(xml, "") + + def test_config_memory_backing_all(self): + obj = config.LibvirtConfigGuestMemoryTune() + obj.soft_limit = 6 + obj.hard_limit = 28 + obj.swap_hard_limit = 140 + obj.min_guarantee = 270 + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + 28 + 6 + 140 + 270 + """) + + class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest): def test_config_metadata(self): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 0255ac1167..4872ba515f 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -48,15 +48,16 @@ def __init__(self, **kwargs): self.ns_prefix = kwargs.get('ns_prefix') self.ns_uri = kwargs.get('ns_uri') - def _new_node(self, name): + def _new_node(self, name, **kwargs): if self.ns_uri is None: - return etree.Element(name) + return etree.Element(name, **kwargs) else: return etree.Element("{" + self.ns_uri + "}" + name, - nsmap={self.ns_prefix: self.ns_uri}) + nsmap={self.ns_prefix: self.ns_uri}, + **kwargs) - def _text_node(self, name, value): - child = self._new_node(name) + def _text_node(self, name, value, **kwargs): + child = self._new_node(name, **kwargs) child.text = str(value) return child @@ -1330,6 +1331,40 @@ def format_dom(self): return root +class LibvirtConfigGuestMemoryTune(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestMemoryTune, self).__init__( + root_name="memtune", **kwargs) + + self.hard_limit = None + self.soft_limit = None + self.swap_hard_limit = None + self.min_guarantee = None + + def format_dom(self): + root = super(LibvirtConfigGuestMemoryTune, self).format_dom() + + if self.hard_limit is not None: + root.append(self._text_node("hard_limit", + str(self.hard_limit), + units="K")) + if self.soft_limit is not None: + root.append(self._text_node("soft_limit", + str(self.soft_limit), + units="K")) + if self.swap_hard_limit is not None: + root.append(self._text_node("swap_hard_limit", + str(self.swap_hard_limit), + units="K")) + if self.min_guarantee is not None: + root.append(self._text_node("min_guarantee", + str(self.min_guarantee), + units="K")) + + return root + + class LibvirtConfigGuest(LibvirtConfigObject): def __init__(self, **kwargs): @@ -1341,6 +1376,7 @@ def __init__(self, **kwargs): self.name = None self.memory = 500 * units.Mi self.membacking = None + self.memtune = None self.vcpus = 1 self.cpuset = None self.cpu = None @@ -1368,6 +1404,8 @@ def _format_basic_props(self, root): root.append(self._text_node("memory", self.memory)) if self.membacking is not None: root.append(self.membacking.format_dom()) + if self.memtune is not None: + root.append(self.memtune.format_dom()) if self.cpuset is not None: vcpu = self._text_node("vcpu", self.vcpus) vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset)) From f67a2c6848c111b7dafb2daa0f8cb83a9a832407 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Fri, 11 Jul 2014 14:28:56 -0300 Subject: [PATCH 140/486] Add ibmveth model as a supported network driver for KVM ibmveth (spapr-vlan) is supported by KVM/Qemu on ppc64. This change allows users to change hw_vif_model='spapr-vlan' property to use ibmveth driver. Change-Id: Ibec6abad458c5cb15df57afa05f7f0742e4feb14 --- nova/network/model.py | 1 + nova/tests/virt/libvirt/test_vif.py | 27 ++++++++++++++++----------- nova/virt/libvirt/vif.py | 6 ++++-- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/nova/network/model.py b/nova/network/model.py index 456ab3819d..0424728788 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -53,6 +53,7 @@ def ensure_string_keys(d): VIF_MODEL_E1000 = 'e1000' VIF_MODEL_E1000E = 'e1000e' VIF_MODEL_NETFRONT = 'netfront' +VIF_MODEL_SPAPR_VLAN = 'spapr-vlan' # Constant for max length of network interface names # eg 'bridge' in the Network class or 'devname' in diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py index 62011791b5..a360970b14 100644 --- a/nova/tests/virt/libvirt/test_vif.py +++ b/nova/tests/virt/libvirt/test_vif.py @@ -382,20 +382,25 @@ def test_model_kvm(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_bridge) - self._assertModel(xml, network_model.VIF_MODEL_VIRTIO) - def test_model_kvm_custom(self): - self.flags(use_virtio_for_bridges=True, - virt_type='kvm', - group='libvirt') + def test_model_kvm_qemu_custom(self): + for virt in ('kvm', 'qemu'): + self.flags(use_virtio_for_bridges=True, + virt_type=virt, + group='libvirt') - d = vif.LibvirtGenericVIFDriver(self._get_conn()) - image_meta = {'properties': {'hw_vif_model': - network_model.VIF_MODEL_E1000}} - xml = self._get_instance_xml(d, self.vif_bridge, - image_meta) - self._assertModel(xml, network_model.VIF_MODEL_E1000) + d = vif.LibvirtGenericVIFDriver(self._get_conn()) + supported = (network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_SPAPR_VLAN) + for model in supported: + image_meta = {'properties': {'hw_vif_model': model}} + xml = self._get_instance_xml(d, self.vif_bridge, + image_meta) + self._assertModel(xml, model) def test_model_kvm_bogus(self): self.flags(use_virtio_for_bridges=True, diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 108dc1aabd..d0e26d672f 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -58,12 +58,14 @@ def is_vif_model_valid_for_virt(virt_type, vif_model): network_model.VIF_MODEL_NE2K_PCI, network_model.VIF_MODEL_PCNET, network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000], + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_SPAPR_VLAN], 'kvm': [network_model.VIF_MODEL_VIRTIO, network_model.VIF_MODEL_NE2K_PCI, network_model.VIF_MODEL_PCNET, network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000], + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_SPAPR_VLAN], 'xen': [network_model.VIF_MODEL_NETFRONT, network_model.VIF_MODEL_NE2K_PCI, network_model.VIF_MODEL_PCNET, From f9b5dba7b0612222b20a1d63f494b85c63c0e47e Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 15 Jan 2014 08:39:41 -0800 Subject: [PATCH 141/486] libvirt: VM diagnostics (v3 API only) There is no formal definition for the VM diagnostics. For the V2 API the diagnostics will be returned as they are today. This will support backward compatibility with the existing API's. Part of the blueprint v3-diagnostics DocImpact Change-Id: I4128e76622e73a40d25fb50b10b1c1ac710a5cc4 --- nova/tests/virt/libvirt/test_driver.py | 183 ++++++++++++++++++++++++- nova/tests/virt/test_virt_drivers.py | 2 + nova/virt/libvirt/driver.py | 107 ++++++++++++--- 3 files changed, 274 insertions(+), 18 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index bb2c5266d1..e7a0329241 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -16,6 +16,7 @@ import __builtin__ import contextlib import copy +import datetime import errno import functools import os @@ -51,6 +52,7 @@ from nova.openstack.common import jsonutils from nova.openstack.common import loopingcall from nova.openstack.common import processutils +from nova.openstack.common import timeutils from nova.openstack.common import units from nova.openstack.common import uuidutils from nova.pci import pci_manager @@ -200,7 +202,8 @@ def ID(self): return self.id def info(self): - return [power_state.RUNNING, None, None, None, None] + return [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi, + None, None] def create(self): pass @@ -6723,6 +6726,43 @@ def fake_lookup_name(name): } self.assertEqual(actual, expect) + lt = datetime.datetime(2012, 11, 22, 12, 00, 00) + diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) + timeutils.set_time_override(diags_time) + + actual = conn.get_instance_diagnostics({"name": "testvirt", + "launched_at": lt}) + expected = {'config_drive': False, + 'cpu_details': [], + 'disk_details': [{'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}, + {'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}], + 'driver': 'libvirt', + 'hypervisor_os': 'linux', + 'memory_details': {'maximum': 2048, 'used': 1234}, + 'nic_details': [{'mac_address': '52:54:00:a4:38:38', + 'rx_drop': 0L, + 'rx_errors': 0L, + 'rx_octets': 4408L, + 'rx_packets': 82L, + 'tx_drop': 0L, + 'tx_errors': 0L, + 'tx_octets': 0L, + 'tx_packets': 0L}], + 'state': 'running', + 'uptime': 10, + 'version': '1.0'} + self.assertEqual(expected, actual.serialize()) + def test_diagnostic_blockstats_exception(self): xml = """ @@ -6797,6 +6837,35 @@ def fake_lookup_name(name): } self.assertEqual(actual, expect) + lt = datetime.datetime(2012, 11, 22, 12, 00, 00) + diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) + timeutils.set_time_override(diags_time) + + actual = conn.get_instance_diagnostics({"name": "testvirt", + "launched_at": lt}) + expected = {'config_drive': False, + 'cpu_details': [{'time': 15340000000L}, + {'time': 1640000000L}, + {'time': 3040000000L}, + {'time': 1420000000L}], + 'disk_details': [], + 'driver': 'libvirt', + 'hypervisor_os': 'linux', + 'memory_details': {'maximum': 2048, 'used': 1234}, + 'nic_details': [{'mac_address': '52:54:00:a4:38:38', + 'rx_drop': 0L, + 'rx_errors': 0L, + 'rx_octets': 4408L, + 'rx_packets': 82L, + 'tx_drop': 0L, + 'tx_errors': 0L, + 'tx_octets': 0L, + 'tx_packets': 0L}], + 'state': 'running', + 'uptime': 10, + 'version': '1.0'} + self.assertEqual(expected, actual.serialize()) + def test_diagnostic_interfacestats_exception(self): xml = """ @@ -6873,6 +6942,38 @@ def fake_lookup_name(name): } self.assertEqual(actual, expect) + lt = datetime.datetime(2012, 11, 22, 12, 00, 00) + diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) + timeutils.set_time_override(diags_time) + + actual = conn.get_instance_diagnostics({"name": "testvirt", + "launched_at": lt}) + expected = {'config_drive': False, + 'cpu_details': [{'time': 15340000000L}, + {'time': 1640000000L}, + {'time': 3040000000L}, + {'time': 1420000000L}], + 'disk_details': [{'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}, + {'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}], + 'driver': 'libvirt', + 'hypervisor_os': 'linux', + 'memory_details': {'maximum': 2048, 'used': 1234}, + 'nic_details': [], + 'state': 'running', + 'uptime': 10, + 'version': '1.0'} + self.assertEqual(expected, actual.serialize()) + def test_diagnostic_memorystats_exception(self): xml = """ @@ -6955,6 +7056,46 @@ def fake_lookup_name(name): } self.assertEqual(actual, expect) + lt = datetime.datetime(2012, 11, 22, 12, 00, 00) + diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) + timeutils.set_time_override(diags_time) + + actual = conn.get_instance_diagnostics({"name": "testvirt", + "launched_at": lt}) + expected = {'config_drive': False, + 'cpu_details': [{'time': 15340000000L}, + {'time': 1640000000L}, + {'time': 3040000000L}, + {'time': 1420000000L}], + 'disk_details': [{'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}, + {'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}], + 'driver': 'libvirt', + 'hypervisor_os': 'linux', + 'memory_details': {'maximum': 2048, 'used': 1234}, + 'nic_details': [{'mac_address': '52:54:00:a4:38:38', + 'rx_drop': 0L, + 'rx_errors': 0L, + 'rx_octets': 4408L, + 'rx_packets': 82L, + 'tx_drop': 0L, + 'tx_errors': 0L, + 'tx_octets': 0L, + 'tx_packets': 0L}], + 'state': 'running', + 'uptime': 10, + 'version': '1.0'} + self.assertEqual(expected, actual.serialize()) + def test_diagnostic_full(self): xml = """ @@ -7039,6 +7180,46 @@ def fake_lookup_name(name): } self.assertEqual(actual, expect) + lt = datetime.datetime(2012, 11, 22, 12, 00, 00) + diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) + timeutils.set_time_override(diags_time) + + actual = conn.get_instance_diagnostics({"name": "testvirt", + "launched_at": lt}) + expected = {'config_drive': False, + 'cpu_details': [{'time': 15340000000L}, + {'time': 1640000000L}, + {'time': 3040000000L}, + {'time': 1420000000L}], + 'disk_details': [{'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}, + {'errors_count': 0, + 'id': '', + 'read_bytes': 688640L, + 'read_requests': 169L, + 'write_bytes': 0L, + 'write_requests': 0L}], + 'driver': 'libvirt', + 'hypervisor_os': 'linux', + 'memory_details': {'maximum': 2048, 'used': 1234}, + 'nic_details': [{'mac_address': '52:54:00:a4:38:38', + 'rx_drop': 0L, + 'rx_errors': 0L, + 'rx_octets': 4408L, + 'rx_packets': 82L, + 'tx_drop': 0L, + 'tx_errors': 0L, + 'tx_octets': 0L, + 'tx_packets': 0L}], + 'state': 'running', + 'uptime': 10, + 'version': '1.0'} + self.assertEqual(expected, actual.serialize()) + @mock.patch.object(libvirt_driver.LibvirtDriver, "_list_instance_domains") def test_failing_vcpu_count(self, mock_list): diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index ff51d4c346..2597d559bb 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -27,6 +27,7 @@ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging +from nova.openstack.common import timeutils from nova import test from nova.tests.image import fake as fake_image from nova.tests import utils as test_utils @@ -498,6 +499,7 @@ def test_get_diagnostics(self): @catch_notimplementederror def test_get_instance_diagnostics(self): instance_ref, network_info = self._get_running_instance(obj=True) + instance_ref['launched_at'] = timeutils.utcnow() self.connection.get_instance_diagnostics(instance_ref) @catch_notimplementederror diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 6c39391ac2..d9ab634408 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -69,6 +69,7 @@ from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils +from nova.openstack.common import timeutils from nova.openstack.common import units from nova.openstack.common import xmlutils from nova.pci import pci_manager @@ -79,6 +80,7 @@ from nova import version from nova.virt import block_device as driver_block_device from nova.virt import configdrive +from nova.virt import diagnostics from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import event as virtevent @@ -5312,24 +5314,25 @@ def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" self._cleanup_resize(instance, network_info) - def get_diagnostics(self, instance): - def get_io_devices(xml_doc): - """get the list of io devices from the xml document.""" - result = {"volumes": [], "ifaces": []} - try: - doc = etree.fromstring(xml_doc) - except Exception: - return result - blocks = [('./devices/disk', 'volumes'), - ('./devices/interface', 'ifaces')] - for block, key in blocks: - section = doc.findall(block) - for node in section: - for child in node.getchildren(): - if child.tag == 'target' and child.get('dev'): - result[key].append(child.get('dev')) + @staticmethod + def _get_io_devices(xml_doc): + """get the list of io devices from the xml document.""" + result = {"volumes": [], "ifaces": []} + try: + doc = etree.fromstring(xml_doc) + except Exception: return result + blocks = [('./devices/disk', 'volumes'), + ('./devices/interface', 'ifaces')] + for block, key in blocks: + section = doc.findall(block) + for node in section: + for child in node.getchildren(): + if child.tag == 'target' and child.get('dev'): + result[key].append(child.get('dev')) + return result + def get_diagnostics(self, instance): domain = self._lookup_by_name(instance['name']) output = {} # get cpu time, might launch an exception if the method @@ -5343,7 +5346,7 @@ def get_io_devices(xml_doc): pass # get io status xml = domain.XMLDesc(0) - dom_io = get_io_devices(xml) + dom_io = LibvirtDriver._get_io_devices(xml) for guest_disk in dom_io["volumes"]: try: # blockStats might launch an exception if the method @@ -5385,6 +5388,76 @@ def get_io_devices(xml_doc): pass return output + def get_instance_diagnostics(self, instance): + domain = self._lookup_by_name(instance['name']) + xml = domain.XMLDesc(0) + xml_doc = etree.fromstring(xml) + + (state, max_mem, mem, num_cpu, cpu_time) = domain.info() + config_drive = configdrive.required_by(instance) + launched_at = timeutils.normalize_time(instance['launched_at']) + uptime = timeutils.delta_seconds(launched_at, + timeutils.utcnow()) + diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state], + driver='libvirt', + config_drive=config_drive, + hypervisor_os='linux', + uptime=uptime) + diags.memory_details.maximum = max_mem / units.Mi + diags.memory_details.used = mem / units.Mi + + # get cpu time, might launch an exception if the method + # is not supported by the underlying hypervisor being + # used by libvirt + try: + cputime = domain.vcpus()[0] + num_cpus = len(cputime) + for i in range(num_cpus): + diags.add_cpu(time=cputime[i][2]) + except libvirt.libvirtError: + pass + # get io status + dom_io = LibvirtDriver._get_io_devices(xml) + for guest_disk in dom_io["volumes"]: + try: + # blockStats might launch an exception if the method + # is not supported by the underlying hypervisor being + # used by libvirt + stats = domain.blockStats(guest_disk) + diags.add_disk(read_bytes=stats[1], + read_requests=stats[0], + write_bytes=stats[3], + write_requests=stats[2]) + except libvirt.libvirtError: + pass + for interface in dom_io["ifaces"]: + try: + # interfaceStats might launch an exception if the method + # is not supported by the underlying hypervisor being + # used by libvirt + stats = domain.interfaceStats(interface) + diags.add_nic(rx_octets=stats[0], + rx_errors=stats[2], + rx_drop=stats[3], + rx_packets=stats[1], + tx_octets=stats[4], + tx_errors=stats[6], + tx_drop=stats[7], + tx_packets=stats[5]) + except libvirt.libvirtError: + pass + + # Update mac addresses of interface if stats have been reported + if len(diags.nic_details) > 0: + ret = xml_doc.findall('./devices/interface') + index = 0 + for node in ret: + for child in node.getchildren(): + if child.tag == 'mac': + diags.nic_details[index].mac_address = child.get( + 'address') + return diags + def instance_on_disk(self, instance): # ensure directories exist and are writable instance_path = libvirt_utils.get_instance_path(instance) From 6b0ca82673e6b4120fd2bacb656e239ed5afd265 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 15 Jul 2014 11:24:20 -0700 Subject: [PATCH 142/486] Add quota limit create/update methods to Quotas object This adds two methods to the Quotas object that are effectively passthroughs to the DB api calls. Since the Quotas object isn't actually a representation of what is in the database, and because we've (incorrectly) got some logic in the DB API around how the quotas are applied, this is just a temporary step. Ideally, we would (will) refactor Quotas in several ways in the near future, at which time the way all of this works can be revisited and cleaned up. Related to blueprint compute-manager-objects-juno Change-Id: If93614cd27e56667e0f106e8045dd1adc633ff1b --- nova/objects/quotas.py | 19 +++++++++++++++++++ nova/tests/objects/test_objects.py | 4 ++-- nova/tests/objects/test_quotas.py | 16 ++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/nova/objects/quotas.py b/nova/objects/quotas.py index 288c7ed668..2731c53d7e 100644 --- a/nova/objects/quotas.py +++ b/nova/objects/quotas.py @@ -13,6 +13,7 @@ # under the License. +from nova import db from nova.objects import base from nova.objects import fields from nova import quota @@ -39,6 +40,10 @@ def ids_from_security_group(context, security_group): class Quotas(base.NovaObject): + # Version 1.0: initial version + # Version 1.1: Added create_limit() and update_limit() + VERSION = '1.1' + fields = { 'reservations': fields.ListOfStringsField(nullable=True), 'project_id': fields.StringField(nullable=True), @@ -106,6 +111,20 @@ def rollback(self, context=None): self.reservations = None self.obj_reset_changes() + @base.remotable_classmethod + def create_limit(cls, context, project_id, resource, limit, user_id=None): + # NOTE(danms,comstud): Quotas likely needs an overhaul and currently + # doesn't map very well to objects. Since there is quite a bit of + # logic in the db api layer for this, just pass this through for now. + db.quota_create(context, project_id, resource, limit, user_id=user_id) + + @base.remotable_classmethod + def update_limit(cls, context, project_id, resource, limit, user_id=None): + # NOTE(danms,comstud): Quotas likely needs an overhaul and currently + # doesn't map very well to objects. Since there is quite a bit of + # logic in the db api layer for this, just pass this through for now. + db.quota_update(context, project_id, resource, limit, user_id=user_id) + class QuotasNoOp(Quotas): def reserve(context, expire=None, project_id=None, user_id=None, diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 4d593363ee..8fe05fe5cb 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -937,8 +937,8 @@ def test_object_serialization_iterables(self): 'NetworkList': '1.2-16510568c6e64cb8b358cb2b11333196', 'PciDevice': '1.1-523c46f960d93f78db55f0280b09441e', 'PciDeviceList': '1.0-5da7b4748a5a2594bae2cd0bd211cca2', - 'Quotas': '1.0-1933ffdc585c205445331fe842567eb3', - 'QuotasNoOp': '1.0-187356d5a8b8e4a3505148ea4e96cfcb', + 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418', + 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f', 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b', 'SecurityGroupList': '1.0-9513387aabf08c2a7961ac4da4315ed4', 'SecurityGroupRule': '1.0-fdd020bdd7eb8bac744ad6f9a4ef8165', diff --git a/nova/tests/objects/test_quotas.py b/nova/tests/objects/test_quotas.py index daae2b52a2..c2a9892a7d 100644 --- a/nova/tests/objects/test_quotas.py +++ b/nova/tests/objects/test_quotas.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from nova import context from nova.objects import quotas as quotas_obj from nova import quota @@ -142,6 +144,20 @@ def test_rollback_none_reservations(self): self.mox.ReplayAll() quotas.rollback() + @mock.patch('nova.db.quota_create') + def test_create_limit(self, mock_create): + quotas_obj.Quotas.create_limit(self.context, 'fake-project', + 'foo', 10, user_id='user') + mock_create.assert_called_once_with(self.context, 'fake-project', + 'foo', 10, user_id='user') + + @mock.patch('nova.db.quota_update') + def test_update_limit(self, mock_update): + quotas_obj.Quotas.update_limit(self.context, 'fake-project', + 'foo', 10, user_id='user') + mock_update.assert_called_once_with(self.context, 'fake-project', + 'foo', 10, user_id='user') + class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest): pass From 728794b74bf2a1c6b89e046096cfff4609d37082 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 15 Jul 2014 11:26:29 -0700 Subject: [PATCH 143/486] Make quotas APIv2 extension use Quotas object for create/update This makes the quotas extension use the Quotas object for the create/update operations instead of direct database access. Related to blueprint compute-manager-objects-juno Change-Id: Ia2db239d721c9ea680e11409e5eefc59e0dee941 --- nova/api/openstack/compute/contrib/quotas.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py index c8ba23b0e3..8468a41d86 100644 --- a/nova/api/openstack/compute/contrib/quotas.py +++ b/nova/api/openstack/compute/contrib/quotas.py @@ -20,9 +20,9 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil import nova.context -from nova import db from nova import exception from nova.i18n import _ +from nova import objects from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import quota @@ -196,11 +196,11 @@ def update(self, req, id, body): maximum = settable_quotas[key]['maximum'] self._validate_quota_limit(value, minimum, maximum) try: - db.quota_create(context, project_id, key, value, - user_id=user_id) + objects.Quotas.create_limit(context, project_id, + key, value, user_id=user_id) except exception.QuotaExists: - db.quota_update(context, project_id, key, value, - user_id=user_id) + objects.Quotas.update_limit(context, project_id, + key, value, user_id=user_id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_set': self._get_quotas(context, id, user_id=user_id)} From e8ea5388f27181efa6a5dee9e0a44183c330c64d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 17 Jul 2014 10:38:02 -0700 Subject: [PATCH 144/486] Make quotas APIv3 extension use Quotas object for create/update This makes the quotas extension use the Quotas object for the create/update operations instead of direct database access. Related to blueprint compute-manager-objects-juno Change-Id: I74295a6f8ebca46421eafedbb28e677d2361c2b8 --- nova/api/openstack/compute/plugins/v3/quota_sets.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/quota_sets.py b/nova/api/openstack/compute/plugins/v3/quota_sets.py index 8b9bf46d28..e857d7a7d7 100644 --- a/nova/api/openstack/compute/plugins/v3/quota_sets.py +++ b/nova/api/openstack/compute/plugins/v3/quota_sets.py @@ -21,9 +21,9 @@ from nova.api.openstack import wsgi from nova.api import validation import nova.context -from nova import db from nova import exception from nova.i18n import _ +from nova import objects from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import quota @@ -158,11 +158,11 @@ def update(self, req, id, body): maximum = settable_quotas[key]['maximum'] self._validate_quota_limit(value, minimum, maximum) try: - db.quota_create(context, project_id, key, value, - user_id=user_id) + objects.Quotas.create_limit(context, project_id, + key, value, user_id=user_id) except exception.QuotaExists: - db.quota_update(context, project_id, key, value, - user_id=user_id) + objects.Quotas.update_limit(context, project_id, + key, value, user_id=user_id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return self._format_quota_set(id, self._get_quotas(context, id, From 2c5c4f7ab67aefd6282b343a8679bceb03340e55 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 23 Jul 2014 14:23:34 +0000 Subject: [PATCH 145/486] Updated from global requirements Change-Id: Ie0e3f03a5cf1a90236100a61653fea72d2d1a69f --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7a6f2f9a86..d118ccb14a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ boto>=2.12.0,!=2.13.0 decorator>=3.4.0 eventlet>=0.13.0 Jinja2 -keystonemiddleware +keystonemiddleware>=1.0.0 kombu>=2.4.8 lxml>=2.3 Routes>=1.12.3,!=2.0 From 0e98f5a522c08b17c98ed108459a179d14eacd4a Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 16 Jul 2014 16:47:16 -0700 Subject: [PATCH 146/486] Update database columns nullable to match model Some columns were created with a different nullable value than the model defines. Closes-Bug: 1343331 Change-Id: Iafc72e571648ba1763f457ae00ac84a63c94e0cd --- .../versions/247_nullable_mismatch.py | 43 +++++++++++++++++++ nova/tests/db/test_migrations.py | 20 +++++++++ 2 files changed, 63 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py b/nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py new file mode 100644 index 0000000000..78f608732c --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py @@ -0,0 +1,43 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + quota_usages = Table('quota_usages', meta, autoload=True) + quota_usages.c.resource.alter(nullable=False) + + pci_devices = Table('pci_devices', meta, autoload=True) + pci_devices.c.deleted.alter(nullable=True) + pci_devices.c.product_id.alter(nullable=False) + pci_devices.c.vendor_id.alter(nullable=False) + pci_devices.c.dev_type.alter(nullable=False) + + +def downgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + quota_usages = Table('quota_usages', meta, autoload=True) + quota_usages.c.resource.alter(nullable=True) + + pci_devices = Table('pci_devices', meta, autoload=True) + pci_devices.c.deleted.alter(nullable=False) + pci_devices.c.product_id.alter(nullable=True) + pci_devices.c.vendor_id.alter(nullable=True) + pci_devices.c.dev_type.alter(nullable=True) diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index 03e761056f..03f68b05da 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -717,6 +717,26 @@ def _post_downgrade_246(self, engine): self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys if fk.parent.name == 'compute_node_id'])) + def _check_247(self, engine, data): + quota_usages = oslodbutils.get_table(engine, 'quota_usages') + self.assertFalse(quota_usages.c.resource.nullable) + + pci_devices = oslodbutils.get_table(engine, 'pci_devices') + self.assertTrue(pci_devices.c.deleted.nullable) + self.assertFalse(pci_devices.c.product_id.nullable) + self.assertFalse(pci_devices.c.vendor_id.nullable) + self.assertFalse(pci_devices.c.dev_type.nullable) + + def _post_downgrade_247(self, engine): + quota_usages = oslodbutils.get_table(engine, 'quota_usages') + self.assertTrue(quota_usages.c.resource.nullable) + + pci_devices = oslodbutils.get_table(engine, 'pci_devices') + self.assertFalse(pci_devices.c.deleted.nullable) + self.assertTrue(pci_devices.c.product_id.nullable) + self.assertTrue(pci_devices.c.vendor_id.nullable) + self.assertTrue(pci_devices.c.dev_type.nullable) + class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" From 8ecc07e8f21bddf60fe836f34beab470589918e0 Mon Sep 17 00:00:00 2001 From: Derek Higgins Date: Wed, 23 Jul 2014 17:24:18 +0100 Subject: [PATCH 147/486] Revert "Add extensible resources to resource tracker" This bug added a regression to both nova-bm and ironic, neither can deploy instances. Fixes-bug: #1347795 This reverts commit 50b4ba4ee583d25eef10a6608172c002f9bec6f2. Change-Id: Icc8d629467911972480b633c7808a0964c9f1c7d --- nova/compute/claims.py | 39 ++- nova/compute/resource_tracker.py | 37 +-- nova/compute/resources/__init__.py | 133 -------- nova/compute/resources/base.py | 93 ------ nova/compute/resources/vcpu.py | 83 ----- nova/compute/stats.py | 20 +- nova/tests/compute/fake_resource_tracker.py | 2 - nova/tests/compute/test_claims.py | 48 +-- nova/tests/compute/test_resource_tracker.py | 42 +-- nova/tests/compute/test_resources.py | 344 -------------------- nova/tests/compute/test_stats.py | 3 + setup.cfg | 2 - 12 files changed, 84 insertions(+), 762 deletions(-) delete mode 100644 nova/compute/resources/__init__.py delete mode 100644 nova/compute/resources/base.py delete mode 100644 nova/compute/resources/vcpu.py delete mode 100644 nova/tests/compute/test_resources.py diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 4f5356ce78..046d171692 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -42,6 +42,10 @@ def disk_gb(self): def memory_mb(self): return 0 + @property + def vcpus(self): + return 0 + def __enter__(self): return self @@ -53,8 +57,8 @@ def abort(self): pass def __str__(self): - return "[Claim: %d MB memory, %d GB disk]" % (self.memory_mb, - self.disk_gb) + return "[Claim: %d MB memory, %d GB disk, %d VCPUS]" % (self.memory_mb, + self.disk_gb, self.vcpus) class Claim(NopClaim): @@ -98,6 +102,10 @@ def disk_gb(self): def memory_mb(self): return self.instance['memory_mb'] + self.overhead['memory_mb'] + @property + def vcpus(self): + return self.instance['vcpus'] + def abort(self): """Compute operation requiring claimed resources has failed or been aborted. @@ -122,16 +130,18 @@ def _claim_test(self, resources, limits=None): # unlimited: memory_mb_limit = limits.get('memory_mb') disk_gb_limit = limits.get('disk_gb') + vcpu_limit = limits.get('vcpu') msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d " - "GB") - params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb} + "GB, VCPUs %(vcpus)d") + params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb, + 'vcpus': self.vcpus} LOG.audit(msg % params, instance=self.instance) reasons = [self._test_memory(resources, memory_mb_limit), self._test_disk(resources, disk_gb_limit), + self._test_cpu(resources, vcpu_limit), self._test_pci()] - reasons = reasons + self._test_ext_resources(limits) reasons = [r for r in reasons if r is not None] if len(reasons) > 0: raise exception.ComputeResourcesUnavailable(reason= @@ -166,9 +176,14 @@ def _test_pci(self): if not can_claim: return _('Claim pci failed.') - def _test_ext_resources(self, limits): - return self.tracker.ext_resources_handler.test_resources( - self.instance, limits) + def _test_cpu(self, resources, limit): + type_ = _("CPUs") + unit = "VCPUs" + total = resources['vcpus'] + used = resources['vcpus_used'] + requested = self.vcpus + + return self._test(type_, unit, total, used, requested, limit) def _test(self, type_, unit, total, used, requested, limit): """Test if the given type of resource needed for a claim can be safely @@ -220,6 +235,10 @@ def disk_gb(self): def memory_mb(self): return self.instance_type['memory_mb'] + self.overhead['memory_mb'] + @property + def vcpus(self): + return self.instance_type['vcpus'] + def _test_pci(self): pci_requests = pci_request.get_instance_pci_requests( self.instance, 'new_') @@ -229,10 +248,6 @@ def _test_pci(self): if not claim: return _('Claim pci failed.') - def _test_ext_resources(self, limits): - return self.tracker.ext_resources_handler.test_resources( - self.instance_type, limits) - def abort(self): """Compute operation requiring claimed resources has failed or been aborted. diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index d1eb96cf72..fb65f77c3a 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -24,7 +24,6 @@ from nova.compute import claims from nova.compute import flavors from nova.compute import monitors -from nova.compute import resources as ext_resources from nova.compute import task_states from nova.compute import vm_states from nova import conductor @@ -47,10 +46,7 @@ help='Amount of memory in MB to reserve for the host'), cfg.StrOpt('compute_stats_class', default='nova.compute.stats.Stats', - help='Class that will manage stats for the local compute host'), - cfg.ListOpt('compute_resources', - default=['vcpu'], - help='The names of the extra resources to track.'), + help='Class that will manage stats for the local compute host') ] CONF = cfg.CONF @@ -79,8 +75,6 @@ def __init__(self, host, driver, nodename): self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self) - self.ext_resources_handler = \ - ext_resources.ResourceHandler(CONF.compute_resources) self.notifier = rpc.get_notifier() self.old_resources = {} @@ -235,10 +229,12 @@ def drop_resize_claim(self, instance, instance_type=None, prefix='new_'): instance_type = self._get_instance_type(ctxt, instance, prefix) if instance_type['id'] == itype['id']: + self.stats.update_stats_for_migration(itype, sign=-1) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(instance, sign=-1) self._update_usage(self.compute_node, itype, sign=-1) + self.compute_node['stats'] = jsonutils.dumps(self.stats) ctxt = context.get_admin_context() self._update(ctxt, self.compute_node) @@ -381,20 +377,9 @@ def _sync_compute_node(self, context, resources): LOG.info(_('Compute_service record updated for %(host)s:%(node)s') % {'host': self.host, 'node': self.nodename}) - def _write_ext_resources(self, resources): - resources['stats'] = {} - resources['stats'].update(self.stats) - self.ext_resources_handler.write_resources(resources) - def _create(self, context, values): """Create the compute node in the DB.""" # initialize load stats from existing instances: - self._write_ext_resources(values) - # NOTE(pmurray): the stats field is stored as a json string. The - # json conversion will be done automatically by the ComputeNode object - # so this can be removed when using ComputeNode. - values['stats'] = jsonutils.dumps(values['stats']) - self.compute_node = self.conductor_api.compute_node_create(context, values) @@ -464,17 +449,10 @@ def _resource_change(self, resources): def _update(self, context, values): """Persist the compute node updates to the DB.""" - self._write_ext_resources(values) - # NOTE(pmurray): the stats field is stored as a json string. The - # json conversion will be done automatically by the ComputeNode object - # so this can be removed when using ComputeNode. - values['stats'] = jsonutils.dumps(values['stats']) - if not self._resource_change(values): return if "service" in self.compute_node: del self.compute_node['service'] - self.compute_node = self.conductor_api.compute_node_update( context, self.compute_node, values) if self.pci_tracker: @@ -497,7 +475,7 @@ def _update_usage(self, resources, usage, sign=1): resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances - self.ext_resources_handler.update_from_instance(usage, sign) + resources['vcpus_used'] = self.stats.num_vcpus_used def _update_usage_from_migration(self, context, instance, resources, migration): @@ -540,9 +518,11 @@ def _update_usage_from_migration(self, context, instance, resources, migration['old_instance_type_id']) if itype: + self.stats.update_stats_for_migration(itype) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(instance) self._update_usage(resources, itype) + resources['stats'] = jsonutils.dumps(self.stats) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps( self.pci_tracker.stats) @@ -615,6 +595,7 @@ def _update_usage_from_instance(self, resources, instance): self._update_usage(resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() + resources['stats'] = jsonutils.dumps(self.stats) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: @@ -634,6 +615,7 @@ def _update_usage_from_instances(self, resources, instances): # set some initial values, reserve room for host/hypervisor: resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024 resources['memory_mb_used'] = CONF.reserved_host_memory_mb + resources['vcpus_used'] = 0 resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - @@ -641,9 +623,6 @@ def _update_usage_from_instances(self, resources, instances): resources['current_workload'] = 0 resources['running_vms'] = 0 - # Reset values for extended resources - self.ext_resources_handler.reset_resources(resources, self.driver) - for instance in instances: if instance['vm_state'] == vm_states.DELETED: continue diff --git a/nova/compute/resources/__init__.py b/nova/compute/resources/__init__.py deleted file mode 100644 index cb023ea523..0000000000 --- a/nova/compute/resources/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import stevedore - -from nova.i18n import _LW -from nova.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -RESOURCE_NAMESPACE = 'nova.compute.resources' - - -class ResourceHandler(): - - def _log_missing_plugins(self, names): - for name in names: - if name not in self._mgr.names(): - LOG.warn(_LW('Compute resource plugin %s was not loaded') % - name) - - def __init__(self, names, propagate_map_exceptions=False): - """Initialise the resource handler by loading the plugins. - - The ResourceHandler uses stevedore to load the resource plugins. - The handler can handle and report exceptions raised in the plugins - depending on the value of the propagate_map_exceptions parameter. - It is useful in testing to propagate exceptions so they are exposed - as part of the test. If exceptions are not propagated they are - logged at error level. - - Any named plugins that are not located are logged. - - :param names: the list of plugins to load by name - :param propagate_map_exceptions: True indicates exceptions in the - plugins should be raised, False indicates they should be handled and - logged. - """ - self._mgr = stevedore.NamedExtensionManager( - namespace=RESOURCE_NAMESPACE, - names=names, - propagate_map_exceptions=propagate_map_exceptions, - invoke_on_load=True) - self._log_missing_plugins(names) - - def reset_resources(self, resources, driver): - """Reset the resources to their initial state. - - Each plugin is called to reset its state. The resources data provided - is initial state gathered from the hypervisor. The driver is also - provided in case the plugin needs to obtain additional information - from the driver, for example, the memory calculation obtains - the memory overhead from the driver. - - :param resources: the resources reported by the hypervisor - :param driver: the driver for the hypervisor - - :returns: None - """ - if self._mgr.extensions: - self._mgr.map_method('reset', resources, driver) - - def test_resources(self, usage, limits): - """Test the ability to support the given instance. - - Each resource plugin is called to determine if it's resource is able - to support the additional requirements of a new instance. The - plugins either return None to indicate they have sufficient resource - available or a human readable string to indicate why they can not. - - :param usage: the additional resource usage - :param limits: limits used for the calculation - - :returns: a list or return values from the plugins - """ - if not self._mgr.extensions: - return [] - - reasons = self._mgr.map_method('test', usage, limits) - return reasons - - def update_from_instance(self, usage, sign=1): - """Update the resource information to reflect the allocation for - an instance with the given resource usage. - - :param usage: the resource usage of the instance - :param sign: has value 1 or -1. 1 indicates the instance is being - added to the current usage, -1 indicates the instance is being removed. - - :returns: None - """ - if not self._mgr.extensions: - return - - if sign == 1: - self._mgr.map_method('add_instance', usage) - else: - self._mgr.map_method('remove_instance', usage) - - def write_resources(self, resources): - """Write the resource data to populate the resources. - - Each resource plugin is called to write its resource data to - resources. - - :param resources: the compute node resources - - :returns: None - """ - if self._mgr.extensions: - self._mgr.map_method('write', resources) - - def report_free_resources(self): - """Each resource plugin is called to log free resource information. - - :returns: None - """ - if not self._mgr.extensions: - return - - self._mgr.map_method('report_free') diff --git a/nova/compute/resources/base.py b/nova/compute/resources/base.py deleted file mode 100644 index aebd29fb40..0000000000 --- a/nova/compute/resources/base.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Resource(object): - """This base class defines the interface used for compute resource - plugins. It is not necessary to use this base class, but all compute - resource plugins must implement the abstract methods found here. - An instance of the plugin object is instantiated when it is loaded - by calling __init__() with no parameters. - """ - - @abc.abstractmethod - def reset(self, resources, driver): - """Set the resource to an initial state based on the resource - view discovered from the hypervisor. - """ - pass - - @abc.abstractmethod - def test(self, usage, limits): - """Test to see if we have sufficient resources to allocate for - an instance with the given resource usage. - - :param usage: the resource usage of the instances - :param limits: limits to apply - - :returns: None if the test passes or a string describing the reason - why the test failed - """ - pass - - @abc.abstractmethod - def add_instance(self, usage): - """Update resource information adding allocation according to the - given resource usage. - - :param usage: the resource usage of the instance being added - - :returns: None - """ - pass - - @abc.abstractmethod - def remove_instance(self, usage): - """Update resource information removing allocation according to the - given resource usage. - - :param usage: the resource usage of the instance being removed - - :returns: None - - """ - pass - - @abc.abstractmethod - def write(self, resources): - """Write resource data to populate resources. - - :param resources: the resources data to be populated - - :returns: None - """ - pass - - @abc.abstractmethod - def report_free(self): - """Log free resources. - - This method logs how much free resource is held by - the resource plugin. - - :returns: None - """ - pass diff --git a/nova/compute/resources/vcpu.py b/nova/compute/resources/vcpu.py deleted file mode 100644 index e7290a3e1a..0000000000 --- a/nova/compute/resources/vcpu.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.compute.resources import base -from nova.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class VCPU(base.Resource): - """VCPU compute resource plugin. - - This is effectively a simple counter based on the vcpu requirement of each - instance. - """ - def __init__(self): - # initialize to a 'zero' resource. - # reset will be called to set real resource values - self._total = 0 - self._used = 0 - - def reset(self, resources, driver): - # total vcpu is reset to the value taken from resources. - self._total = int(resources['vcpus']) - self._used = 0 - - def _get_requested(self, usage): - return int(usage.get('vcpus', 0)) - - def _get_limit(self, limits): - if limits and 'vcpu' in limits: - return int(limits.get('vcpu')) - - def test(self, usage, limits): - requested = self._get_requested(usage) - limit = self._get_limit(limits) - - LOG.debug('Total CPUs: %(total)d VCPUs, used: %(used).02f VCPUs' % - {'total': self._total, 'used': self._used}) - - if limit is None: - # treat resource as unlimited: - LOG.debug('CPUs limit not specified, defaulting to unlimited') - return - - free = limit - self._used - - # Oversubscribed resource policy info: - LOG.debug('CPUs limit: %(limit).02f VCPUs, free: %(free).02f VCPUs' % - {'limit': limit, 'free': free}) - - if requested > free: - return ('Free CPUs %(free).02f VCPUs < ' - 'requested %(requested)d VCPUs' % - {'free': free, 'requested': requested}) - - def add_instance(self, usage): - requested = int(usage.get('vcpus', 0)) - self._used += requested - - def remove_instance(self, usage): - requested = int(usage.get('vcpus', 0)) - self._used -= requested - - def write(self, resources): - resources['vcpus'] = self._total - resources['vcpus_used'] = self._used - - def report_free(self): - free_vcpus = self._total - self._used - LOG.debug('Free VCPUs: %s' % free_vcpus) diff --git a/nova/compute/stats.py b/nova/compute/stats.py index b347b8d5c0..bf183b012c 100644 --- a/nova/compute/stats.py +++ b/nova/compute/stats.py @@ -73,6 +73,10 @@ def num_os_type(self, os_type): key = "num_os_type_%s" % os_type return self.get(key, 0) + @property + def num_vcpus_used(self): + return self.get("num_vcpus_used", 0) + def update_stats_for_instance(self, instance): """Update stats after an instance is changed.""" @@ -87,12 +91,14 @@ def update_stats_for_instance(self, instance): self._decrement("num_task_%s" % old_state['task_state']) self._decrement("num_os_type_%s" % old_state['os_type']) self._decrement("num_proj_%s" % old_state['project_id']) + x = self.get("num_vcpus_used", 0) + self["num_vcpus_used"] = x - old_state['vcpus'] else: # new instance self._increment("num_instances") # Now update stats from the new instance state: - (vm_state, task_state, os_type, project_id) = \ + (vm_state, task_state, os_type, project_id, vcpus) = \ self._extract_state_from_instance(instance) if vm_state == vm_states.DELETED: @@ -104,10 +110,16 @@ def update_stats_for_instance(self, instance): self._increment("num_task_%s" % task_state) self._increment("num_os_type_%s" % os_type) self._increment("num_proj_%s" % project_id) + x = self.get("num_vcpus_used", 0) + self["num_vcpus_used"] = x + vcpus # save updated I/O workload in stats: self["io_workload"] = self.io_workload + def update_stats_for_migration(self, instance_type, sign=1): + x = self.get("num_vcpus_used", 0) + self["num_vcpus_used"] = x + (sign * instance_type['vcpus']) + def _decrement(self, key): x = self.get(key, 0) self[key] = x - 1 @@ -124,8 +136,10 @@ def _extract_state_from_instance(self, instance): task_state = instance['task_state'] os_type = instance['os_type'] project_id = instance['project_id'] + vcpus = instance['vcpus'] self.states[uuid] = dict(vm_state=vm_state, task_state=task_state, - os_type=os_type, project_id=project_id) + os_type=os_type, project_id=project_id, + vcpus=vcpus) - return (vm_state, task_state, os_type, project_id) + return (vm_state, task_state, os_type, project_id, vcpus) diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py index b0fec2042b..c8f1e14647 100644 --- a/nova/tests/compute/fake_resource_tracker.py +++ b/nova/tests/compute/fake_resource_tracker.py @@ -20,12 +20,10 @@ class FakeResourceTracker(resource_tracker.ResourceTracker): """Version without a DB requirement.""" def _create(self, context, values): - self._write_ext_resources(values) self.compute_node = values self.compute_node['id'] = 1 def _update(self, context, values, prune_stats=False): - self._write_ext_resources(values) self.compute_node.update(values) def _get_service(self, context): diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py index 0df1875c17..be60f54016 100644 --- a/nova/tests/compute/test_claims.py +++ b/nova/tests/compute/test_claims.py @@ -25,21 +25,10 @@ from nova import test -class FakeResourceHandler(object): - test_called = False - usage_is_instance = False - - def test_resources(self, usage, limits): - self.test_called = True - self.usage_is_itype = usage.get('name') is 'fakeitype' - return [] - - class DummyTracker(object): icalled = False rcalled = False pci_tracker = pci_manager.PciDevTracker() - ext_resources_handler = FakeResourceHandler() def abort_instance_claim(self, *args, **kwargs): self.icalled = True @@ -112,6 +101,9 @@ def assertRaisesRegexp(self, re_obj, e, fn, *a, **kw): except e as ee: self.assertTrue(re.search(re_obj, str(ee))) + def test_cpu_unlimited(self): + self._claim(vcpus=100000) + def test_memory_unlimited(self): self._claim(memory_mb=99999999) @@ -121,6 +113,10 @@ def test_disk_unlimited_root(self): def test_disk_unlimited_ephemeral(self): self._claim(ephemeral_gb=999999) + def test_cpu_oversubscription(self): + limits = {'vcpu': 16} + self._claim(limits, vcpus=8) + def test_memory_with_overhead(self): overhead = {'memory_mb': 8} limits = {'memory_mb': 2048} @@ -135,6 +131,11 @@ def test_memory_with_overhead_insufficient(self): self._claim, limits=limits, overhead=overhead, memory_mb=2040) + def test_cpu_insufficient(self): + limits = {'vcpu': 16} + self.assertRaises(exception.ComputeResourcesUnavailable, + self._claim, limits=limits, vcpus=17) + def test_memory_oversubscription(self): self._claim(memory_mb=4096) @@ -161,6 +162,21 @@ def test_disk_and_memory_insufficient(self): self._claim, limits=limits, root_gb=10, ephemeral_gb=40, memory_mb=16384) + def test_disk_and_cpu_insufficient(self): + limits = {'disk_gb': 45, 'vcpu': 16} + self.assertRaisesRegexp(re.compile("disk.*vcpus", re.IGNORECASE), + exception.ComputeResourcesUnavailable, + self._claim, limits=limits, root_gb=10, ephemeral_gb=40, + vcpus=17) + + def test_disk_and_cpu_and_memory_insufficient(self): + limits = {'disk_gb': 45, 'vcpu': 16, 'memory_mb': 8192} + pat = "memory.*disk.*vcpus" + self.assertRaisesRegexp(re.compile(pat, re.IGNORECASE), + exception.ComputeResourcesUnavailable, + self._claim, limits=limits, root_gb=10, ephemeral_gb=40, + vcpus=17, memory_mb=16384) + def test_pci_pass(self): dev_dict = { 'compute_node_id': 1, @@ -208,11 +224,6 @@ def test_pci_pass_no_requests(self): self._set_pci_request(claim) claim._test_pci() - def test_ext_resources(self): - self._claim() - self.assertTrue(self.tracker.ext_resources_handler.test_called) - self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype) - def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.icalled) @@ -249,11 +260,6 @@ def _set_pci_request(self, claim): claim.instance.update( system_metadata={'new_pci_requests': jsonutils.dumps(request)}) - def test_ext_resources(self): - self._claim() - self.assertTrue(self.tracker.ext_resources_handler.test_called) - self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype) - def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.rcalled) diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 06112e245a..364cfd6e2d 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -22,7 +22,6 @@ from nova.compute import flavors from nova.compute import resource_tracker -from nova.compute import resources from nova.compute import task_states from nova.compute import vm_states from nova import context @@ -46,7 +45,6 @@ EPHEMERAL_GB = 1 FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB FAKE_VIRT_VCPUS = 1 -RESOURCE_NAMES = ['vcpu'] CONF = cfg.CONF @@ -162,10 +160,8 @@ def _create_compute_node(self, values=None): "current_workload": 1, "running_vms": 0, "cpu_info": None, - "stats": { - "num_instances": "1", - }, - "hypervisor_hostname": "fakenode", + "stats": [{"key": "num_instances", "value": "1"}], + "hypervisor_hostname": "fakenode", } if values: compute.update(values) @@ -318,8 +314,6 @@ def _tracker(self, host=None): driver = self._driver() tracker = resource_tracker.ResourceTracker(host, driver, node) - tracker.ext_resources_handler = \ - resources.ResourceHandler(RESOURCE_NAMES, True) return tracker @@ -572,38 +566,6 @@ def _driver(self): return FakeVirtDriver(pci_support=True) -class TrackerExtraResourcesTestCase(BaseTrackerTestCase): - - def setUp(self): - super(TrackerExtraResourcesTestCase, self).setUp() - self.driver = self._driver() - - def _driver(self): - return FakeVirtDriver() - - def test_set_empty_ext_resources(self): - resources = self.driver.get_available_resource(self.tracker.nodename) - self.assertNotIn('stats', resources) - self.tracker._write_ext_resources(resources) - self.assertIn('stats', resources) - - def test_set_extra_resources(self): - def fake_write_resources(resources): - resources['stats']['resA'] = '123' - resources['stats']['resB'] = 12 - - self.stubs.Set(self.tracker.ext_resources_handler, - 'write_resources', - fake_write_resources) - - resources = self.driver.get_available_resource(self.tracker.nodename) - self.tracker._write_ext_resources(resources) - - expected = {"resA": "123", "resB": 12} - self.assertEqual(sorted(expected), - sorted(resources['stats'])) - - class InstanceClaimTestCase(BaseTrackerTestCase): def test_update_usage_only_for_tracked(self): diff --git a/nova/tests/compute/test_resources.py b/nova/tests/compute/test_resources.py deleted file mode 100644 index db2722ccb5..0000000000 --- a/nova/tests/compute/test_resources.py +++ /dev/null @@ -1,344 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the compute extra resources framework.""" - - -from oslo.config import cfg -from stevedore import extension -from stevedore import named - -from nova.compute import resources -from nova.compute.resources import base -from nova.compute.resources import vcpu -from nova import context -from nova.i18n import _ -from nova.objects import flavor as flavor_obj -from nova import test -from nova.tests.fake_instance import fake_instance_obj - -CONF = cfg.CONF - - -class FakeResourceHandler(resources.ResourceHandler): - def __init__(self, extensions): - self._mgr = \ - named.NamedExtensionManager.make_test_instance(extensions) - - -class FakeResource(base.Resource): - - def __init__(self): - self.total_res = 0 - self.used_res = 0 - - def _get_requested(self, usage): - if 'extra_specs' not in usage: - return - if self.resource_name not in usage['extra_specs']: - return - req = usage['extra_specs'][self.resource_name] - return int(req) - - def _get_limit(self, limits): - if self.resource_name not in limits: - return - limit = limits[self.resource_name] - return int(limit) - - def reset(self, resources, driver): - self.total_res = 0 - self.used_res = 0 - - def test(self, usage, limits): - requested = self._get_requested(usage) - if not requested: - return - - limit = self._get_limit(limits) - if not limit: - return - - free = limit - self.used_res - if requested <= free: - return - else: - return (_('Free %(free)d < requested %(requested)d ') % - {'free': free, 'requested': requested}) - - def add_instance(self, usage): - requested = self._get_requested(usage) - if requested: - self.used_res += requested - - def remove_instance(self, usage): - requested = self._get_requested(usage) - if requested: - self.used_res -= requested - - def write(self, resources): - pass - - def report_free(self): - return "Free %s" % (self.total_res - self.used_res) - - -class ResourceA(FakeResource): - - def reset(self, resources, driver): - # ResourceA uses a configuration option - self.total_res = int(CONF.resA) - self.used_res = 0 - self.resource_name = 'resource:resA' - - def write(self, resources): - resources['resA'] = self.total_res - resources['used_resA'] = self.used_res - - -class ResourceB(FakeResource): - - def reset(self, resources, driver): - # ResourceB uses resource details passed in parameter resources - self.total_res = resources['resB'] - self.used_res = 0 - self.resource_name = 'resource:resB' - - def write(self, resources): - resources['resB'] = self.total_res - resources['used_resB'] = self.used_res - - -def fake_flavor_obj(**updates): - flavor = flavor_obj.Flavor() - flavor.id = 1 - flavor.name = 'fakeflavor' - flavor.memory_mb = 8000 - flavor.vcpus = 3 - flavor.root_gb = 11 - flavor.ephemeral_gb = 4 - flavor.swap = 0 - flavor.rxtx_factor = 1.0 - flavor.vcpu_weight = 1 - if updates: - flavor.update(updates) - return flavor - - -class BaseTestCase(test.TestCase): - - def _initialize_used_res_counter(self): - # Initialize the value for the used resource - for ext in self.r_handler._mgr.extensions: - ext.obj.used_res = 0 - - def setUp(self): - super(BaseTestCase, self).setUp() - - # initialize flavors and stub get_by_id to - # get flavors from here - self._flavors = {} - self.ctxt = context.get_admin_context() - - # Create a flavor without extra_specs defined - _flavor_id = 1 - _flavor = fake_flavor_obj(id=_flavor_id) - self._flavors[_flavor_id] = _flavor - - # Create a flavor with extra_specs defined - _flavor_id = 2 - requested_resA = 5 - requested_resB = 7 - requested_resC = 7 - _extra_specs = {'resource:resA': requested_resA, - 'resource:resB': requested_resB, - 'resource:resC': requested_resC} - _flavor = fake_flavor_obj(id=_flavor_id, - extra_specs=_extra_specs) - self._flavors[_flavor_id] = _flavor - - # create fake resource extensions and resource handler - _extensions = [ - extension.Extension('resA', None, ResourceA, ResourceA()), - extension.Extension('resB', None, ResourceB, ResourceB()), - ] - self.r_handler = FakeResourceHandler(_extensions) - - # Resources details can be passed to each plugin or can be specified as - # configuration options - driver_resources = {'resB': 5} - CONF.resA = '10' - - # initialise the resources - self.r_handler.reset_resources(driver_resources, None) - - def test_update_from_instance_with_extra_specs(self): - # Flavor with extra_specs - _flavor_id = 2 - sign = 1 - self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) - - expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA'] - expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB'] - self.assertEqual(int(expected_resA), - self.r_handler._mgr['resA'].obj.used_res) - self.assertEqual(int(expected_resB), - self.r_handler._mgr['resB'].obj.used_res) - - def test_update_from_instance_without_extra_specs(self): - # Flavor id without extra spec - _flavor_id = 1 - self._initialize_used_res_counter() - self.r_handler.resource_list = [] - sign = 1 - self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) - self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res) - self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res) - - def test_write_resources(self): - self._initialize_used_res_counter() - extra_resources = {} - expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0} - self.r_handler.write_resources(extra_resources) - self.assertEqual(expected, extra_resources) - - def test_test_resources_without_extra_specs(self): - limits = {} - # Flavor id without extra_specs - flavor = self._flavors[1] - result = self.r_handler.test_resources(flavor, limits) - self.assertEqual([None, None], result) - - def test_test_resources_with_limits_for_different_resource(self): - limits = {'resource:resC': 20} - # Flavor id with extra_specs - flavor = self._flavors[2] - result = self.r_handler.test_resources(flavor, limits) - self.assertEqual([None, None], result) - - def test_passing_test_resources(self): - limits = {'resource:resA': 10, 'resource:resB': 20} - # Flavor id with extra_specs - flavor = self._flavors[2] - self._initialize_used_res_counter() - result = self.r_handler.test_resources(flavor, limits) - self.assertEqual([None, None], result) - - def test_failing_test_resources_for_single_resource(self): - limits = {'resource:resA': 4, 'resource:resB': 20} - # Flavor id with extra_specs - flavor = self._flavors[2] - self._initialize_used_res_counter() - result = self.r_handler.test_resources(flavor, limits) - expected = ['Free 4 < requested 5 ', None] - self.assertEqual(sorted(expected), - sorted(result)) - - def test_empty_resource_handler(self): - """An empty resource handler has no resource extensions, - should have no effect, and should raise no exceptions. - """ - empty_r_handler = FakeResourceHandler([]) - - resources = {} - empty_r_handler.reset_resources(resources, None) - - flavor = self._flavors[1] - sign = 1 - empty_r_handler.update_from_instance(flavor, sign) - - limits = {} - test_result = empty_r_handler.test_resources(flavor, limits) - self.assertEqual([], test_result) - - sign = -1 - empty_r_handler.update_from_instance(flavor, sign) - - extra_resources = {} - expected_extra_resources = extra_resources - empty_r_handler.write_resources(extra_resources) - self.assertEqual(expected_extra_resources, extra_resources) - - empty_r_handler.report_free_resources() - - def test_vcpu_resource_load(self): - # load the vcpu example - names = ['vcpu'] - real_r_handler = resources.ResourceHandler(names) - ext_names = real_r_handler._mgr.names() - self.assertEqual(names, ext_names) - - # check the extension loaded is the one we expect - # and an instance of the object has been created - ext = real_r_handler._mgr['vcpu'] - self.assertIsInstance(ext.obj, vcpu.VCPU) - - -class TestVCPU(test.TestCase): - - def setUp(self): - super(TestVCPU, self).setUp() - self._vcpu = vcpu.VCPU() - self._vcpu._total = 10 - self._vcpu._used = 0 - self._flavor = fake_flavor_obj(vcpus=5) - self._big_flavor = fake_flavor_obj(vcpus=20) - self._instance = fake_instance_obj(None) - - def test_reset(self): - # set vcpu values to something different to test reset - self._vcpu._total = 10 - self._vcpu._used = 5 - - driver_resources = {'vcpus': 20} - self._vcpu.reset(driver_resources, None) - self.assertEqual(20, self._vcpu._total) - self.assertEqual(0, self._vcpu._used) - - def test_add_and_remove_instance(self): - self._vcpu.add_instance(self._flavor) - self.assertEqual(10, self._vcpu._total) - self.assertEqual(5, self._vcpu._used) - - self._vcpu.remove_instance(self._flavor) - self.assertEqual(10, self._vcpu._total) - self.assertEqual(0, self._vcpu._used) - - def test_test_pass_limited(self): - result = self._vcpu.test(self._flavor, {'vcpu': 10}) - self.assertIsNone(result, 'vcpu test failed when it should pass') - - def test_test_pass_unlimited(self): - result = self._vcpu.test(self._big_flavor, {}) - self.assertIsNone(result, 'vcpu test failed when it should pass') - - def test_test_fail(self): - result = self._vcpu.test(self._flavor, {'vcpu': 2}) - expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs') - self.assertEqual(expected, result) - - def test_write(self): - resources = {'stats': {}} - self._vcpu.write(resources) - expected = { - 'vcpus': 10, - 'vcpus_used': 0, - 'stats': { - 'num_vcpus': 10, - 'num_vcpus_used': 0 - } - } - self.assertEqual(sorted(expected), - sorted(resources)) diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py index c90314b0fc..1864ac7950 100644 --- a/nova/tests/compute/test_stats.py +++ b/nova/tests/compute/test_stats.py @@ -136,6 +136,8 @@ def test_add_stats_for_instance(self): self.assertEqual(1, self.stats["num_vm_None"]) self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING]) + self.assertEqual(10, self.stats.num_vcpus_used) + def test_calculate_workload(self): self.stats._increment("num_task_None") self.stats._increment("num_task_" + task_states.SCHEDULING) @@ -189,6 +191,7 @@ def test_update_stats_for_instance_deleted(self): self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) + self.assertEqual(0, self.stats.num_vcpus_used) def test_io_workload(self): vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED] diff --git a/setup.cfg b/setup.cfg index 50c185cf30..cb8c651ff2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,8 +27,6 @@ packages = nova [entry_points] -nova.compute.resources = - vcpu = nova.compute.resources.vcpu:VCPU nova.image.download.modules = file = nova.image.download.file console_scripts = From 4df0ea9a2207325215cd2dd499ec77c7a0e34f17 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sun, 20 Jul 2014 14:52:59 -0400 Subject: [PATCH 148/486] Correct image_metadata API use of nova.image.glance The image_metadata API controller was using the nova.image.glance module directly. This patch cleans that up and removes the associated unit test's use of stub_out_glance() from the nova.tests.api.openstack.fake module. Change-Id: Ia4a6cc54600c2154e14adb43184ac4ebfb7de539 --- nova/api/openstack/compute/image_metadata.py | 22 +- nova/image/api.py | 2 +- .../openstack/compute/test_image_metadata.py | 197 ++++++++++++++---- nova/tests/api/openstack/fakes.py | 59 +----- 4 files changed, 167 insertions(+), 113 deletions(-) diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py index a77e37266e..ec67aa4d3e 100644 --- a/nova/api/openstack/compute/image_metadata.py +++ b/nova/api/openstack/compute/image_metadata.py @@ -19,19 +19,21 @@ from nova.api.openstack import wsgi from nova import exception from nova.i18n import _ -from nova.image import glance +import nova.image class Controller(object): """The image metadata API controller for the OpenStack API.""" def __init__(self): - self.image_service = glance.get_default_image_service() + self.image_api = nova.image.API() def _get_image(self, context, image_id): try: - return self.image_service.show(context, image_id) - except exception.NotFound: + return self.image_api.get(context, image_id) + except exception.ImageNotAuthorized as e: + raise exc.HTTPForbidden(explanation=e.format_message()) + except exception.ImageNotFound: msg = _("Image not found.") raise exc.HTTPNotFound(explanation=msg) @@ -62,7 +64,8 @@ def create(self, req, image_id, body): common.check_img_metadata_properties_quota(context, image['properties']) try: - image = self.image_service.update(context, image_id, image, None) + image = self.image_api.update(context, image_id, image, data=None, + purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(metadata=image['properties']) @@ -90,7 +93,8 @@ def update(self, req, image_id, id, body): common.check_img_metadata_properties_quota(context, image['properties']) try: - self.image_service.update(context, image_id, image, None) + self.image_api.update(context, image_id, image, data=None, + purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(meta=meta) @@ -104,7 +108,8 @@ def update_all(self, req, image_id, body): common.check_img_metadata_properties_quota(context, metadata) image['properties'] = metadata try: - self.image_service.update(context, image_id, image, None) + self.image_api.update(context, image_id, image, data=None, + purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(metadata=metadata) @@ -118,7 +123,8 @@ def delete(self, req, image_id, id): raise exc.HTTPNotFound(explanation=msg) image['properties'].pop(id) try: - self.image_service.update(context, image_id, image, None) + self.image_api.update(context, image_id, image, data=None, + purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) diff --git a/nova/image/api.py b/nova/image/api.py index ad7096162d..c5f983a6f4 100644 --- a/nova/image/api.py +++ b/nova/image/api.py @@ -108,7 +108,7 @@ def update(self, context, id_or_uri, image_info, passed to the image registry. :param data: Optional file handle or bytestream iterator that is passed to backend storage. - :param purge_props: Optional, defaults to True. If set, the backend + :param purge_props: Optional, defaults to False. If set, the backend image registry will clear all image properties and replace them the image properties supplied in the image_info dictionary's 'properties' diff --git a/nova/tests/api/openstack/compute/test_image_metadata.py b/nova/tests/api/openstack/compute/test_image_metadata.py index 0add556b44..822157ea79 100644 --- a/nova/tests/api/openstack/compute/test_image_metadata.py +++ b/nova/tests/api/openstack/compute/test_image_metadata.py @@ -13,59 +13,94 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo.config import cfg +import copy + +import mock import webob from nova.api.openstack.compute import image_metadata +from nova import exception from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes +from nova.tests import image_fixtures + +IMAGE_FIXTURES = image_fixtures.get_image_fixtures() +CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota' -CONF = cfg.CONF +def get_image_123(): + return copy.deepcopy(IMAGE_FIXTURES)[0] -class ImageMetaDataTest(test.TestCase): + +class ImageMetaDataTest(test.NoDBTestCase): def setUp(self): super(ImageMetaDataTest, self).setUp() - fakes.stub_out_glance(self.stubs) self.controller = image_metadata.Controller() - def test_index(self): + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_index(self, get_all_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') res_dict = self.controller.index(req, '123') expected = {'metadata': {'key1': 'value1'}} self.assertEqual(res_dict, expected) + get_all_mocked.assert_called_once_with(mock.ANY, '123') - def test_show(self): + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_show(self, get_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') res_dict = self.controller.show(req, '123', 'key1') self.assertIn('meta', res_dict) self.assertEqual(len(res_dict['meta']), 1) self.assertEqual('value1', res_dict['meta']['key1']) + get_mocked.assert_called_once_with(mock.ANY, '123') - def test_show_not_found(self): + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_show_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '123', 'key9') - def test_show_image_not_found(self): + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotFound(image_id='100')) + def test_show_image_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '100', 'key9') - def test_create(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_create(self, get_mocked, update_mocked, quota_mocked): + mock_result = copy.deepcopy(get_image_123()) + mock_result['properties']['key7'] = 'value7' + update_mocked.return_value = mock_result req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') req.method = 'POST' body = {"metadata": {"key7": "value7"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.create(req, '123', body) + get_mocked.assert_called_once_with(mock.ANY, '123') + expected = copy.deepcopy(get_image_123()) + expected['properties'] = { + 'key1': 'value1', # existing meta + 'key7': 'value7' # new meta + } + quota_mocked.assert_called_once_with(mock.ANY, expected["properties"]) + update_mocked.assert_called_once_with(mock.ANY, '123', expected, + data=None, purge_props=True) expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}} self.assertEqual(expected_output, res) - def test_create_image_not_found(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotFound(image_id='100')) + def test_create_image_not_found(self, _get_mocked, update_mocked, + quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata') req.method = 'POST' body = {"metadata": {"key7": "value7"}} @@ -74,19 +109,35 @@ def test_create_image_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, '100', body) + self.assertFalse(quota_mocked.called) + self.assertFalse(update_mocked.called) - def test_update_all(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_update_all(self, get_mocked, update_mocked, quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') req.method = 'PUT' body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.update_all(req, '123', body) + get_mocked.assert_called_once_with(mock.ANY, '123') + expected = copy.deepcopy(get_image_123()) + expected['properties'] = { + 'key9': 'value9' # replace meta + } + quota_mocked.assert_called_once_with(mock.ANY, expected["properties"]) + update_mocked.assert_called_once_with(mock.ANY, '123', expected, + data=None, purge_props=True) expected_output = {'metadata': {'key9': 'value9'}} self.assertEqual(expected_output, res) - def test_update_all_image_not_found(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotFound(image_id='100')) + def test_update_all_image_not_found(self, _get_mocked, quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata') req.method = 'PUT' body = {"metadata": {"key9": "value9"}} @@ -95,19 +146,33 @@ def test_update_all_image_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) + self.assertFalse(quota_mocked.called) - def test_update_item(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_update_item(self, _get_mocked, update_mocked, quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "zz"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.update(req, '123', 'key1', body) + expected = copy.deepcopy(get_image_123()) + expected['properties'] = { + 'key1': 'zz' # changed meta + } + quota_mocked.assert_called_once_with(mock.ANY, expected["properties"]) + update_mocked.assert_called_once_with(mock.ANY, '123', expected, + data=None, purge_props=True) expected_output = {'meta': {'key1': 'zz'}} self.assertEqual(res, expected_output) - def test_update_item_image_not_found(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotFound(image_id='100')) + def test_update_item_image_not_found(self, _get_mocked, quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "zz"}} @@ -116,8 +181,13 @@ def test_update_item_image_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, '100', 'key1', body) + self.assertFalse(quota_mocked.called) - def test_update_item_bad_body(self): + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get') + def test_update_item_bad_body(self, get_mocked, update_mocked, + quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' body = {"key1": "zz"} @@ -126,21 +196,33 @@ def test_update_item_bad_body(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '123', 'key1', body) - - def test_update_item_too_many_keys(self): + self.assertFalse(get_mocked.called) + self.assertFalse(quota_mocked.called) + self.assertFalse(update_mocked.called) + + @mock.patch(CHK_QUOTA_STR, + side_effect=webob.exc.HTTPRequestEntityTooLarge( + explanation='', headers={'Retry-After': 0})) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get') + def test_update_item_too_many_keys(self, get_mocked, update_mocked, + _quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' - overload = {} - for num in range(CONF.quota_metadata_items + 1): - overload['key%s' % num] = 'value%s' % num - body = {'meta': overload} + body = {"metadata": {"foo": "bar"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '123', 'key1', body) - - def test_update_item_body_uri_mismatch(self): + self.assertFalse(get_mocked.called) + self.assertFalse(update_mocked.called) + + @mock.patch(CHK_QUOTA_STR) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked, + quota_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} @@ -149,44 +231,64 @@ def test_update_item_body_uri_mismatch(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '123', 'bad', body) + self.assertFalse(quota_mocked.called) + self.assertFalse(update_mocked.called) - def test_delete(self): + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_delete(self, _get_mocked, update_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'DELETE' res = self.controller.delete(req, '123', 'key1') + expected = copy.deepcopy(get_image_123()) + expected['properties'] = {} + update_mocked.assert_called_once_with(mock.ANY, '123', expected, + data=None, purge_props=True) self.assertIsNone(res) - def test_delete_not_found(self): + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_delete_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '123', 'blah') - def test_delete_image_not_found(self): + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotFound(image_id='100')) + def test_delete_image_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '100', 'key1') - def test_too_many_metadata_items_on_create(self): - data = {"metadata": {}} - for num in range(CONF.quota_metadata_items + 1): - data['metadata']['key%i' % num] = "blah" + @mock.patch(CHK_QUOTA_STR, + side_effect=webob.exc.HTTPRequestEntityTooLarge( + explanation='', headers={'Retry-After': 0})) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_too_many_metadata_items_on_create(self, _get_mocked, + update_mocked, _quota_mocked): + body = {"metadata": {"foo": "bar"}} req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata') req.method = 'POST' - req.body = jsonutils.dumps(data) + req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, '123', data) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, '123', data) - - def test_too_many_metadata_items_on_put(self): - self.flags(quota_metadata_items=1) + self.controller.create, req, '123', body) + self.assertFalse(update_mocked.called) + + @mock.patch(CHK_QUOTA_STR, + side_effect=webob.exc.HTTPRequestEntityTooLarge( + explanation='', headers={'Retry-After': 0})) + @mock.patch('nova.image.api.API.update') + @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + def test_too_many_metadata_items_on_put(self, _get_mocked, + update_mocked, _quota_mocked): + body = {"metadata": {"foo": "bar"}} req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah') req.method = 'PUT' body = {"meta": {"blah": "blah"}} @@ -195,22 +297,23 @@ def test_too_many_metadata_items_on_put(self): self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, '123', 'blah', body) + self.assertFalse(update_mocked.called) - def test_image_not_authorized_update(self): - image_id = 131 - # see nova.tests.api.openstack.fakes:_make_image_fixtures - - req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1' - % image_id) + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotAuthorized(image_id='123')) + def test_image_not_authorized_update(self, _get_mocked): + req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPForbidden, - self.controller.update, req, image_id, 'key1', body) + self.controller.update, req, '123', 'key1', body) - def test_image_not_authorized_update_all(self): + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotAuthorized(image_id='123')) + def test_image_not_authorized_update_all(self, _get_mocked): image_id = 131 # see nova.tests.api.openstack.fakes:_make_image_fixtures @@ -224,7 +327,9 @@ def test_image_not_authorized_update_all(self): self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all, req, image_id, body) - def test_image_not_authorized_create(self): + @mock.patch('nova.image.api.API.get', + side_effect=exception.ImageNotAuthorized(image_id='123')) + def test_image_not_authorized_create(self, _get_mocked): image_id = 131 # see nova.tests.api.openstack.fakes:_make_image_fixtures diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 72df1c3d5d..3e1190d743 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -36,14 +36,13 @@ from nova import context from nova.db.sqlalchemy import models from nova import exception as exc -import nova.image.glance +import nova.netconf from nova.network import api as network_api from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova import quota from nova.tests import fake_block_device from nova.tests import fake_network -from nova.tests.glance import stubs as glance_stubs from nova.tests.objects import test_keypair from nova import utils from nova import wsgi @@ -248,62 +247,6 @@ def validate_networks(self, context, networks, max_count): fake_network.stub_out_nw_api_get_instance_nw_info(stubs) -# TODO(jaypipes): Remove this when stub_out_glance() is removed after -# image metadata pieces are fixed to call nova.image.API instead of the -# nova.image.glance module directly. -def _make_image_fixtures(): - NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" - - image_id = 123 - - fixtures = [] - - def add_fixture(**kwargs): - fixtures.append(kwargs) - - # Public image - add_fixture(id=image_id, name='public image', is_public=True, - status='active', properties={'key1': 'value1'}, - min_ram="128", min_disk="10", size='25165824') - image_id += 1 - - # Snapshot for User 1 - uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74' - snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'} - for status in ('queued', 'saving', 'active', 'killed', - 'deleted', 'pending_delete'): - deleted = False if status != 'deleted' else True - deleted_at = NOW_GLANCE_FORMAT if deleted else None - - add_fixture(id=image_id, name='%s snapshot' % status, - is_public=False, status=status, - properties=snapshot_properties, size='25165824', - deleted=deleted, deleted_at=deleted_at) - image_id += 1 - - # Image without a name - add_fixture(id=image_id, is_public=True, status='active', properties={}) - # Image for permission tests - image_id += 1 - add_fixture(id=image_id, is_public=True, status='active', properties={}, - owner='authorized_fake') - - return fixtures - - -def stub_out_glance(stubs): - def fake_get_remote_image_service(): - client = glance_stubs.StubGlanceClient(_make_image_fixtures()) - client_wrapper = nova.image.glance.GlanceClientWrapper() - client_wrapper.host = 'fake_host' - client_wrapper.port = 9292 - client_wrapper.client = client - return nova.image.glance.GlanceImageService(client=client_wrapper) - stubs.Set(nova.image.glance, - 'get_default_image_service', - fake_get_remote_image_service) - - class FakeToken(object): id_count = 0 From 4bea6f04bfe9b67d354b39336f4af3e91ace3332 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Fri, 27 Jun 2014 14:06:32 +0100 Subject: [PATCH 149/486] vmwareapi: remove some unused fake vim methods Rename_Task unused since I92acdd5cd00f739d504738413d3b63a2e17f2866 AcquireCloneTicket and PowerDownHostToStandBy_Task were never used. Change-Id: I025d121db41ed1f6084364ff1a52eade1c3d6a90 --- nova/tests/virt/vmwareapi/fake.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index ddbf7f75a8..81c92402e2 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -1464,8 +1464,6 @@ def __getattr__(self, attr_name): elif attr_name == "FindAllByUuid": return lambda *args, **kwargs: self._find_all_by_uuid(attr_name, *args, **kwargs) - elif attr_name == "Rename_Task": - return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "SearchDatastore_Task": return lambda *args, **kwargs: self._search_ds(attr_name, *args, **kwargs) @@ -1484,8 +1482,6 @@ def __getattr__(self, attr_name): elif attr_name == "CancelRetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties_cancel( attr_name, *args, **kwargs) - elif attr_name == "AcquireCloneTicket": - return lambda *args, **kwargs: self._just_return() elif attr_name == "AddPortGroup": return lambda *args, **kwargs: self._add_port_group(attr_name, *args, **kwargs) @@ -1493,8 +1489,6 @@ def __getattr__(self, attr_name): return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "ShutdownHost_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) - elif attr_name == "PowerDownHostToStandBy_Task": - return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "PowerUpHostFromStandBy_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "EnterMaintenanceMode_Task": From def40cfd88dd1f64559b0328a886c507cd310027 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Wed, 11 Jun 2014 11:46:47 -0700 Subject: [PATCH 150/486] Raise NotImplemented default-security-group-rule api with neutron Neutron currently does not implement the nova extension default-security-group-rule which allows an admin to set which rules are in the default security group when a tenant is onboarded. Implementing this type of functionality should be done in neutron directly as proxying the api calls to do this from nova is not possible as neutron creates the default security group itself. Previously, if one tried to use this api with neutron a 500 error was raised, now a 501 error is raised. Change-Id: I6d1bb1c9c2d79278a85b96aea800b4b1f3489225 Closes-bug: 1326958 --- nova/network/security_group/neutron_driver.py | 20 +++++++++++ .../test_security_group_default_rules.py | 33 +++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py index de88f13203..179bc6f5b2 100644 --- a/nova/network/security_group/neutron_driver.py +++ b/nova/network/security_group/neutron_driver.py @@ -505,3 +505,23 @@ def populate_security_groups(self, instance, security_groups): # in the nova database if using the neutron driver instance['security_groups'] = objects.SecurityGroupList() instance['security_groups'].objects = [] + + def get_default_rule(self, context, id): + msg = _("Network driver does not support this function.") + raise exc.HTTPNotImplemented(explanation=msg) + + def get_all_default_rules(self, context): + msg = _("Network driver does not support this function.") + raise exc.HTTPNotImplemented(explanation=msg) + + def add_default_rules(self, context, vals): + msg = _("Network driver does not support this function.") + raise exc.HTTPNotImplemented(explanation=msg) + + def remove_default_rules(self, context, rule_ids): + msg = _("Network driver does not support this function.") + raise exc.HTTPNotImplemented(explanation=msg) + + def default_rule_exists(self, context, values): + msg = _("Network driver does not support this function.") + raise exc.HTTPNotImplemented(explanation=msg) diff --git a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py index ad0bd6a8d7..a09ea3babf 100644 --- a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py +++ b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py @@ -48,6 +48,39 @@ def security_group_default_rule_db(security_group_default_rule, id=None): return AttrDict(attrs) +class TestSecurityGroupDefaultRulesNeutron(test.TestCase): + def setUp(self): + self.flags(security_group_api='neutron') + super(TestSecurityGroupDefaultRulesNeutron, self).setUp() + self.controller = \ + security_group_default_rules.SecurityGroupDefaultRulesController() + + def test_create_security_group_default_rule_not_implemented_neutron(self): + sgr = security_group_default_rule_template() + req = fakes.HTTPRequest.blank( + '/v2/fake/os-security-group-default-rules', use_admin_context=True) + self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create, + req, {'security_group_default_rule': sgr}) + + def test_security_group_default_rules_list_not_implemented_neturon(self): + req = fakes.HTTPRequest.blank( + '/v2/fake/os-security-group-default-rules', use_admin_context=True) + self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index, + req) + + def test_security_group_default_rules_show_not_implemented_neturon(self): + req = fakes.HTTPRequest.blank( + '/v2/fake/os-security-group-default-rules', use_admin_context=True) + self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show, + req, '602ed77c-a076-4f9b-a617-f93b847b62c5') + + def test_security_group_default_rules_delete_not_implemented_neturon(self): + req = fakes.HTTPRequest.blank( + '/v2/fake/os-security-group-default-rules', use_admin_context=True) + self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete, + req, '602ed77c-a076-4f9b-a617-f93b847b62c5') + + class TestSecurityGroupDefaultRules(test.TestCase): def setUp(self): super(TestSecurityGroupDefaultRules, self).setUp() From a3e3906a66b8aa620fc118bdcb45181b17e7d466 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sat, 19 Jul 2014 22:37:49 -0700 Subject: [PATCH 151/486] VMware: improve log message for attachment of CDROM Commit 3c59d998c5e6b80549053b01f919ac318cd31b13 config drive support. The log message makes use of the instance_name which is not unique. The may be confusing when debugging. TrivialFix Change-Id: Iff7f4f938f4980f0c0bf2dc3017ed741453a69f5 --- nova/virt/vmwareapi/vmops.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index a726e1a3cb..b73e465601 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -601,7 +601,6 @@ def _create_config_drive(self, instance, injected_files, admin_password, def _attach_cdrom_to_vm(self, vm_ref, instance, datastore, file_path): """Attach cdrom to VM by reconfiguration.""" - instance_name = instance.name client_factory = self._session._get_vim().client.factory devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, @@ -617,13 +616,11 @@ def _attach_cdrom_to_vm(self, vm_ref, instance, if controller_spec: cdrom_attach_config_spec.deviceChange.append(controller_spec) - LOG.debug("Reconfiguring VM instance %(instance_name)s to attach " - "cdrom %(file_path)s", - {'instance_name': instance_name, 'file_path': file_path}) + LOG.debug("Reconfiguring VM instance to attach cdrom %s", + file_path, instance=instance) vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec) - LOG.debug("Reconfigured VM instance %(instance_name)s to attach " - "cdrom %(file_path)s", - {'instance_name': instance_name, 'file_path': file_path}) + LOG.debug("Reconfigured VM instance to attach cdrom %s", + file_path, instance=instance) @staticmethod def decide_linked_clone(image_linked_clone, global_linked_clone): From 2ac9f2e9fe0550f400a00373fd9e44cc2e07d1cb Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Tue, 22 Jul 2014 15:12:32 +0800 Subject: [PATCH 152/486] Add i18n log markers in disk api Improve LOG i18n log markers in nova/virt/disk/api.py. Change-Id: I76b5a2acc15bd4230ea6e243770ac0590c56235d --- nova/virt/disk/api.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index ec18a3ecc7..2d63773fcf 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -34,6 +34,8 @@ from nova import exception from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import processutils @@ -359,8 +361,8 @@ def inject_data(image, key=None, net=None, metadata=None, admin_password=None, inject_val = locals()[inject] if inject_val: raise - LOG.warn(_('Ignoring error injecting data into image ' - '(%(e)s)'), {'e': e}) + LOG.warn(_LW('Ignoring error injecting data into image %(image)s ' + '(%(e)s)'), {'image': image, 'e': e}) return False try: @@ -381,7 +383,7 @@ def setup_container(image, container_dir, use_cow=False): img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir) dev = img.mount() if dev is None: - LOG.error(_("Failed to mount container filesystem '%(image)s' " + LOG.error(_LE("Failed to mount container filesystem '%(image)s' " "on '%(target)s': %(errors)s"), {"image": img, "target": container_dir, "errors": img.errors}) @@ -449,8 +451,8 @@ def inject_data_into_fs(fs, key, net, metadata, admin_password, files, except Exception as e: if inject in mandatory: raise - LOG.warn(_('Ignoring error injecting %(inject)s into image ' - '(%(e)s)'), {'e': e, 'inject': inject}) + LOG.warn(_LW('Ignoring error injecting %(inject)s into image ' + '(%(e)s)'), {'inject': inject, 'e': e}) status = False return status From 536bcbd2ce0822bec757f7ec949b9f70df20c966 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Wed, 23 Jul 2014 12:30:26 +0800 Subject: [PATCH 153/486] Fix extra metadata didn't assign into snapshot image create_image action API accept metadata parameter to add extra metadata into snapshot image. But the current code doesn't work for snapshot of volume based instance. This error is because the code use var 'props' to store extra metadatas, and the code use same var to store root bdm metadata later, then the extra metadata is overwritten by root bdm metadata. This patch use another var 'properties' to store root bdm metadata to avoid overwrite the extra metadata. Change-Id: I2224905f596690e22aa71dc6541284f005ebba9d Closes-Bug: #1347355 --- nova/api/openstack/compute/plugins/v3/servers.py | 4 ++-- nova/api/openstack/compute/servers.py | 4 ++-- .../compute/plugins/v3/test_server_actions.py | 15 ++++++++++++++- .../api/openstack/compute/test_server_actions.py | 15 ++++++++++++++- 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 505d651b8b..80bcd36e49 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -904,10 +904,10 @@ def _action_create_image(self, req, id, body): bdms): img = instance['image_ref'] if not img: - props = bdms.root_metadata( + properties = bdms.root_metadata( context, self.compute_api.image_api, self.compute_api.volume_api) - image_meta = {'properties': props} + image_meta = {'properties': properties} else: image_meta = self.compute_api.image_api.get(context, img) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index c63cae40ea..9c92edd3c8 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -1446,10 +1446,10 @@ def _action_create_image(self, req, id, body): bdms): img = instance['image_ref'] if not img: - props = bdms.root_metadata( + properties = bdms.root_metadata( context, self.compute_api.image_api, self.compute_api.volume_api) - image_meta = {'properties': props} + image_meta = {'properties': properties} else: image_meta = self.compute_api.image_api.get(context, img) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index c6d80774d4..10b12e8640 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -920,12 +920,15 @@ def test_create_volume_backed_image_with_metadata(self): self._do_test_create_volume_backed_image(dict(ImageType='Gold', ImageVersion='2.0')) - def test_create_volume_backed_image_with_metadata_from_volume(self): + def _test_create_volume_backed_image_with_metadata_from_volume( + self, extra_metadata=None): def _fake_id(x): return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12) body = dict(create_image=dict(name='snapshot_of_volume_backed')) + if extra_metadata: + body['create_image']['metadata'] = extra_metadata image_service = glance.get_default_image_service() @@ -976,6 +979,16 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id, properties = image['properties'] self.assertEqual(properties['test_key1'], 'test_value1') self.assertEqual(properties['test_key2'], 'test_value2') + if extra_metadata: + for key, val in extra_metadata.items(): + self.assertEqual(properties[key], val) + + def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self): + self._test_create_volume_backed_image_with_metadata_from_volume() + + def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self): + self._test_create_volume_backed_image_with_metadata_from_volume( + extra_metadata={'a': 'b'}) def test_create_image_snapshots_disabled(self): """Don't permit a snapshot if the allow_instance_snapshots flag is diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 950cadef21..36fae46191 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -1111,12 +1111,15 @@ def test_create_volume_backed_image_with_metadata(self): self._do_test_create_volume_backed_image(dict(ImageType='Gold', ImageVersion='2.0')) - def test_create_volume_backed_image_with_metadata_from_volume(self): + def _test_create_volume_backed_image_with_metadata_from_volume( + self, extra_metadata=None): def _fake_id(x): return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12) body = dict(createImage=dict(name='snapshot_of_volume_backed')) + if extra_metadata: + body['createImage']['metadata'] = extra_metadata image_service = glance.get_default_image_service() @@ -1167,6 +1170,16 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id, properties = image['properties'] self.assertEqual(properties['test_key1'], 'test_value1') self.assertEqual(properties['test_key2'], 'test_value2') + if extra_metadata: + for key, val in extra_metadata.items(): + self.assertEqual(properties[key], val) + + def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self): + self._test_create_volume_backed_image_with_metadata_from_volume() + + def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self): + self._test_create_volume_backed_image_with_metadata_from_volume( + extra_metadata={'a': 'b'}) def test_create_image_snapshots_disabled(self): """Don't permit a snapshot if the allow_instance_snapshots flag is From 99ac0d788e6023fb3d3478651b6edf39dd588526 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Mon, 14 Jul 2014 17:12:19 +0800 Subject: [PATCH 154/486] Make the coding style consistent with other Controller in plugins/v3 Add ALIAS and authorize as a global variable. Change-Id: I6bc995008206d033ac4ac1e81c6954e999262861 --- .../openstack/compute/plugins/v3/flavors.py | 6 ++++-- .../compute/plugins/v3/flavors_extraspecs.py | 20 +++++++++---------- nova/api/openstack/compute/plugins/v3/ips.py | 6 ++++-- .../compute/plugins/v3/server_metadata.py | 4 +++- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/flavors.py b/nova/api/openstack/compute/plugins/v3/flavors.py index 7e84219b3e..93c3dd3a79 100644 --- a/nova/api/openstack/compute/plugins/v3/flavors.py +++ b/nova/api/openstack/compute/plugins/v3/flavors.py @@ -24,6 +24,8 @@ from nova.openstack.common import strutils from nova import utils +ALIAS = 'flavors' + class FlavorsController(wsgi.Controller): """Flavor controller for the OpenStack API.""" @@ -116,7 +118,7 @@ def _get_flavors(self, req): class Flavors(extensions.V3APIExtensionBase): """Flavors Extension.""" name = "Flavors" - alias = "flavors" + alias = ALIAS version = 1 def get_resources(self): @@ -124,7 +126,7 @@ def get_resources(self): member_actions = {'action': 'POST'} resources = [ - extensions.ResourceExtension('flavors', + extensions.ResourceExtension(ALIAS, FlavorsController(), member_name='flavor', collection_actions=collection_actions, diff --git a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py index fd380b6715..496772ddc3 100644 --- a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py +++ b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py @@ -23,15 +23,15 @@ from nova.i18n import _ from nova import objects +ALIAS = 'flavor-extra-specs' +authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) + class FlavorExtraSpecsController(object): """The flavor extra specs API controller for the OpenStack API.""" - ALIAS = 'flavor-extra-specs' def __init__(self, *args, **kwargs): super(FlavorExtraSpecsController, self).__init__(*args, **kwargs) - self.authorize = extensions.extension_authorizer('compute', - 'v3:' + self.ALIAS) def _get_extra_specs(self, context, flavor_id): flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) @@ -41,7 +41,7 @@ def _get_extra_specs(self, context, flavor_id): def index(self, req, flavor_id): """Returns the list of extra specs for a given flavor.""" context = req.environ['nova.context'] - self.authorize(context, action='index') + authorize(context, action='index') return self._get_extra_specs(context, flavor_id) @extensions.expected_errors((400, 404, 409)) @@ -49,7 +49,7 @@ def index(self, req, flavor_id): @validation.schema(flavors_extraspecs.create) def create(self, req, flavor_id, body): context = req.environ['nova.context'] - self.authorize(context, action='create') + authorize(context, action='create') specs = body['extra_specs'] try: @@ -66,7 +66,7 @@ def create(self, req, flavor_id, body): @validation.schema(flavors_extraspecs.update) def update(self, req, flavor_id, id, body): context = req.environ['nova.context'] - self.authorize(context, action='update') + authorize(context, action='update') if id not in body: expl = _('Request body and URI mismatch') @@ -85,7 +85,7 @@ def update(self, req, flavor_id, id, body): def show(self, req, flavor_id, id): """Return a single extra spec item.""" context = req.environ['nova.context'] - self.authorize(context, action='show') + authorize(context, action='show') try: flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) return {id: flavor.extra_specs[id]} @@ -103,7 +103,7 @@ def show(self, req, flavor_id, id): def delete(self, req, flavor_id, id): """Deletes an existing extra spec.""" context = req.environ['nova.context'] - self.authorize(context, action='delete') + authorize(context, action='delete') try: flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) del flavor.extra_specs[id] @@ -121,12 +121,12 @@ def delete(self, req, flavor_id, id): class FlavorsExtraSpecs(extensions.V3APIExtensionBase): """Flavors extra specs support.""" name = 'FlavorsExtraSpecs' - alias = FlavorExtraSpecsController.ALIAS + alias = ALIAS version = 1 def get_resources(self): extra_specs = extensions.ResourceExtension( - self.alias, + ALIAS, FlavorExtraSpecsController(), parent=dict(member_name='flavor', collection_name='flavors')) diff --git a/nova/api/openstack/compute/plugins/v3/ips.py b/nova/api/openstack/compute/plugins/v3/ips.py index fe5915b30c..194a093ea7 100644 --- a/nova/api/openstack/compute/plugins/v3/ips.py +++ b/nova/api/openstack/compute/plugins/v3/ips.py @@ -22,6 +22,8 @@ from nova.api.openstack import wsgi from nova.i18n import _ +ALIAS = 'ips' + class IPsController(wsgi.Controller): """The servers addresses API controller for the OpenStack API.""" @@ -67,7 +69,7 @@ class IPs(extensions.V3APIExtensionBase): """Server addresses.""" name = "Ips" - alias = "ips" + alias = ALIAS version = 1 def get_resources(self): @@ -75,7 +77,7 @@ def get_resources(self): 'collection_name': 'servers'} resources = [ extensions.ResourceExtension( - 'ips', IPsController(), parent=parent, member_name='ip')] + ALIAS, IPsController(), parent=parent, member_name='ip')] return resources diff --git a/nova/api/openstack/compute/plugins/v3/server_metadata.py b/nova/api/openstack/compute/plugins/v3/server_metadata.py index 035a57472b..89709e1e72 100644 --- a/nova/api/openstack/compute/plugins/v3/server_metadata.py +++ b/nova/api/openstack/compute/plugins/v3/server_metadata.py @@ -22,6 +22,8 @@ from nova import exception from nova.i18n import _ +ALIAS = 'server-metadata' + class ServerMetadataController(wsgi.Controller): """The server metadata API controller for the OpenStack API.""" @@ -172,7 +174,7 @@ def delete(self, req, server_id, id): class ServerMetadata(extensions.V3APIExtensionBase): """Server Metadata API.""" name = "ServerMetadata" - alias = "server-metadata" + alias = ALIAS version = 1 def get_resources(self): From a6d620c3c0528b5b4018a5dc9768a6f31b29034b Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 15 Jul 2014 16:47:57 +0200 Subject: [PATCH 155/486] Augment oslo's default log levels with nova specific ones. Thanks to I49a82c5b3446784d254ca558f51b6c3e5c2028eb, we can augment oslo's set of default log levels with project specific ones. Add keystonemiddleware as warn level since its verbose and nova isn't trying to debug keystonemiddleware. Add routes.middleware since this generates 3 log entries per API request and doesn't help make debugging easier. Change-Id: Ib5164a22c5cbfa4f9d881b97bb1f623cd4d2f3f3 --- nova/config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/config.py b/nova/config.py index 6cad3485f8..c5600a7025 100644 --- a/nova/config.py +++ b/nova/config.py @@ -19,6 +19,7 @@ from nova import debugger from nova.openstack.common.db import options +from nova.openstack.common import log from nova import paths from nova import rpc from nova import version @@ -30,6 +31,9 @@ def parse_args(argv, default_config_files=None): options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, sqlite_db='nova.sqlite') rpc.set_defaults(control_exchange='nova') + nova_default_log_levels = (log.DEFAULT_LOG_LEVELS + + ["keystonemiddleware=WARN", "routes.middleware=WARN"]) + log.set_defaults(default_log_levels=nova_default_log_levels) debugger.register_cli_opts() cfg.CONF(argv[1:], project='nova', From 478f447070e2aa11ac14d419ae283e6bb9edeeb3 Mon Sep 17 00:00:00 2001 From: abhishek-kekane Date: Wed, 26 Feb 2014 05:03:23 -0800 Subject: [PATCH 156/486] reduce network down time during live-migration Called unplug_vifs() method before post_live_migration_to_destination call because floating ip address will not work until the vifs are unplugged from the source compute node. Added new method post_live_migration_at_source in virt driver and implemented it in libvirt driver to unplug the vifs at source. Other drivers will raise NotImplementedError. Added new keyword argument destroy_vifs to cleanup() method to verify if unplug_vifs() is already called then do not call it again. Closes-Bug: #1284996 Change-Id: Ida3ed3bef77239e5a0fdbf8866e8a4421d4f8222 --- nova/compute/manager.py | 24 ++++++++++++++---------- nova/tests/compute/test_compute.py | 9 ++++++--- nova/tests/virt/test_virt_drivers.py | 19 +++++++++++++++++++ nova/virt/baremetal/driver.py | 2 +- nova/virt/driver.py | 12 +++++++++++- nova/virt/fake.py | 2 +- nova/virt/hyperv/driver.py | 2 +- nova/virt/libvirt/driver.py | 15 +++++++++++++-- nova/virt/vmwareapi/driver.py | 2 +- nova/virt/xenapi/driver.py | 2 +- 10 files changed, 68 insertions(+), 21 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 98f928111b..bbcbf75fd1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4790,6 +4790,17 @@ def _post_live_migration(self, ctxt, instance, instance, migration) + destroy_vifs = False + try: + self.driver.post_live_migration_at_source(ctxt, instance, + network_info) + except NotImplementedError as ex: + LOG.debug(ex, instance=instance) + # For all hypervisors other than libvirt, there is a possibility + # they are unplugging networks from source node in the cleanup + # method + destroy_vifs = True + # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. self.compute_rpcapi.post_live_migration_at_destination(ctxt, @@ -4801,16 +4812,9 @@ def _post_live_migration(self, ctxt, instance, if do_cleanup: self.driver.cleanup(ctxt, instance, network_info, destroy_disks=destroy_disks, - migrate_data=migrate_data) - else: - # self.driver.cleanup() usually performs vif unplugging - # but we must do it explicitly here when block_migration - # is false, as the network devices at the source must be - # torn down - try: - self.driver.unplug_vifs(instance, network_info) - except NotImplementedError as e: - LOG.debug(e, instance=instance) + migrate_data=migrate_data, + destroy_vifs=destroy_vifs) + # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(ctxt, instance, self.host, teardown=True) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 70b3ed7933..d807ef9177 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -5414,7 +5414,8 @@ def test_post_live_migration_working_correctly(self): 'migrate_instance_start'), mock.patch.object(self.compute.compute_rpcapi, 'post_live_migration_at_destination'), - mock.patch.object(self.compute.driver, 'unplug_vifs'), + mock.patch.object(self.compute.driver, + 'post_live_migration_at_source'), mock.patch.object(self.compute.network_api, 'setup_networks_on_host'), mock.patch.object(self.compute.instance_events, @@ -5422,7 +5423,8 @@ def test_post_live_migration_working_correctly(self): ) as ( post_live_migration, unfilter_instance, migrate_instance_start, post_live_migration_at_destination, - unplug_vifs, setup_networks_on_host, clear_events + post_live_migration_at_source, setup_networks_on_host, + clear_events ): self.compute._post_live_migration(c, instance, dest) @@ -5436,7 +5438,8 @@ def test_post_live_migration_working_correctly(self): mock.call(c, instance, migration)]) post_live_migration_at_destination.assert_has_calls([ mock.call(c, instance, False, dest)]) - unplug_vifs.assert_has_calls([mock.call(instance, [])]) + post_live_migration_at_source.assert_has_calls( + [mock.call(c, instance, [])]) setup_networks_on_host.assert_has_calls([ mock.call(c, instance, self.compute.host, teardown=True)]) clear_events.assert_called_once_with(instance) diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index ff51d4c346..4d9af914c9 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -34,6 +34,7 @@ from nova.tests.virt.libvirt import test_driver from nova.virt import event as virtevent from nova.virt import fake +from nova.virt import libvirt from nova.virt.libvirt import imagebackend LOG = logging.getLogger(__name__) @@ -829,3 +830,21 @@ def test_set_host_enabled_dont_override_manually_disabled(self): self.connection._set_host_enabled(False, 'ERROR!') self.assertTrue(service_mock.disabled) self.assertEqual(service_mock.disabled_reason, 'Manually disabled') + + @catch_notimplementederror + @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs') + def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock): + instance_ref, network_info = self._get_running_instance() + self.connection.cleanup(self.ctxt, instance_ref, network_info, + destroy_vifs=False) + self.assertEqual(unplug_vifs_mock.call_count, 0) + + @catch_notimplementederror + @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs') + def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock): + instance_ref, network_info = self._get_running_instance() + self.connection.cleanup(self.ctxt, instance_ref, network_info, + destroy_vifs=True) + self.assertEqual(unplug_vifs_mock.call_count, 1) + unplug_vifs_mock.assert_called_once_with(instance_ref, + network_info, True) diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index 228267e300..e759bd7055 100644 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -400,7 +400,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, "baremetal database: %s") % e) def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed.""" pass diff --git a/nova/virt/driver.py b/nova/virt/driver.py index acfb90f26d..fa5f612168 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -319,7 +319,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, raise NotImplementedError() def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup the instance resources . Instance should have been destroyed from the Hypervisor before calling @@ -695,6 +695,16 @@ def post_live_migration(self, context, instance, block_device_info, """ pass + def post_live_migration_at_source(self, context, instance, network_info): + """Unplug VIFs from networks at source. + + :param context: security context + :param instance: instance object reference + :param network_info: instance network information + """ + raise NotImplementedError(_("Hypervisor driver does not support " + "post_live_migration_at_source method")) + def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 8e338b12d1..5ffcbf8f36 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -219,7 +219,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, 'inst': self.instances}, instance=instance) def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): pass def attach_volume(self, context, connection_info, instance, mountpoint, diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index dc0f2fa889..5b666cfa12 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -64,7 +64,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" pass diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 6c39391ac2..83c4389b5c 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1005,9 +1005,11 @@ def _undefine_domain(self, instance): {'errcode': errcode, 'e': e}, instance=instance) def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): self._undefine_domain(instance) - self._unplug_vifs(instance, network_info, True) + if destroy_vifs: + self._unplug_vifs(instance, network_info, True) + retry = True while retry: try: @@ -4870,6 +4872,15 @@ def post_live_migration(self, context, instance, block_device_info, disk_dev = vol['mount_device'].rpartition("/")[2] self._disconnect_volume(connection_info, disk_dev) + def post_live_migration_at_source(self, context, instance, network_info): + """Unplug VIFs from networks at source. + + :param context: security context + :param instance: instance object reference + :param network_info: instance network information + """ + self.unplug_vifs(instance, network_info) + def post_live_migration_at_destination(self, context, instance, network_info, diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index d5b7e4cc72..a593e9107f 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -195,7 +195,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, self._vmops.destroy(instance, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" pass diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index bac9975351..9cd6fd4be2 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -255,7 +255,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): + destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" pass From b856bc5e348cc69923e3ee815303804323730026 Mon Sep 17 00:00:00 2001 From: Alin Gabriel Serdean Date: Wed, 23 Jul 2014 09:16:09 -0700 Subject: [PATCH 157/486] Add Hyper-V driver in the "compute_driver" option description The description of the option "compute_driver" should include hyperv.HyperVDriver along with the other supported driver. Change-Id: Ic7e16fd5154609987b2ef0b6f8ee52619ccf489c Closes-bug: #1347777 --- nova/virt/driver.py | 6 +++--- nova/virt/hyperv/__init__.py | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index acfb90f26d..5dd905b981 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -33,9 +33,9 @@ driver_opts = [ cfg.StrOpt('compute_driver', help='Driver to use for controlling virtualization. Options ' - 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, ' - 'fake.FakeDriver, baremetal.BareMetalDriver, ' - 'vmwareapi.VMwareVCDriver'), + 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, ' + 'fake.FakeDriver, baremetal.BareMetalDriver, ' + 'vmwareapi.VMwareVCDriver, hyperv.HyperVDriver'), cfg.StrOpt('default_ephemeral_format', help='The default format an ephemeral_volume will be ' 'formatted with on creation.'), diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py index e69de29bb2..475333111b 100644 --- a/nova/virt/hyperv/__init__.py +++ b/nova/virt/hyperv/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2014 Cloudbase Solutions Srl +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.virt.hyperv import driver + +HyperVDriver = driver.HyperVDriver From 62e33be84d7ad3931104b27a02bd796f58972d04 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 24 Jul 2014 10:03:25 +0000 Subject: [PATCH 158/486] Fix duplicated images in test_block_device_mapping In test_block_device_mapping, each test can select to specify image_ref, block_device_mapping or both. The "both" should be negative tests because nova API gets image_id from either image_ref or block_device_mapping. However, there are some "both" tests which are not their purposes. This patch fixes these tests for their purposes by passing True as no_image. These bugs were found through jsonschema validation framework development, because the schema could detect "both" cases fastly and these tests could not get their purposes. Change-Id: I73f1a15d8e08a3eda3190e660338e274358ddd33 --- .../plugins/v3/test_block_device_mapping.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py b/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py index 29530c4174..3ff8a6a046 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py @@ -153,7 +153,8 @@ def create(*args, **kwargs): self.stubs.Set(compute_api.API, 'create', create) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self.assertRaises(exc.HTTPBadRequest, self._test_create, params) + self.assertRaises(exc.HTTPBadRequest, + self._test_create, params, no_image=True) def test_create_instance_with_device_name_empty(self): self.bdm[0]['device_name'] = '' @@ -167,7 +168,8 @@ def create(*args, **kwargs): self.stubs.Set(compute_api.API, 'create', create) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self.assertRaises(exc.HTTPBadRequest, self._test_create, params) + self.assertRaises(exc.HTTPBadRequest, + self._test_create, params, no_image=True) def test_create_instance_with_device_name_too_long(self): self.bdm[0]['device_name'] = 'a' * 256 @@ -181,7 +183,8 @@ def create(*args, **kwargs): self.stubs.Set(compute_api.API, 'create', create) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self.assertRaises(exc.HTTPBadRequest, self._test_create, params) + self.assertRaises(exc.HTTPBadRequest, + self._test_create, params, no_image=True) def test_create_instance_with_space_in_device_name(self): self.bdm[0]['device_name'] = 'v da' @@ -196,7 +199,8 @@ def create(*args, **kwargs): self.stubs.Set(compute_api.API, 'create', create) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self.assertRaises(exc.HTTPBadRequest, self._test_create, params) + self.assertRaises(exc.HTTPBadRequest, + self._test_create, params, no_image=True) def test_create_instance_with_invalid_size(self): self.bdm[0]['volume_size'] = 'hello world' @@ -210,7 +214,8 @@ def create(*args, **kwargs): self.stubs.Set(compute_api.API, 'create', create) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self.assertRaises(exc.HTTPBadRequest, self._test_create, params) + self.assertRaises(exc.HTTPBadRequest, + self._test_create, params, no_image=True) def test_create_instance_bdm(self): bdm = [{ @@ -241,7 +246,7 @@ def _validate_bdm(*args, **kwargs): self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm) params = {block_device_mapping.ATTRIBUTE_NAME: bdm} - self._test_create(params) + self._test_create(params, no_image=True) def test_create_instance_bdm_missing_device_name(self): del self.bdm[0]['device_name'] @@ -261,7 +266,7 @@ def _validate_bdm(*args, **kwargs): self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self._test_create(params) + self._test_create(params, no_image=True) def test_create_instance_bdm_validation_error(self): def _validate(*args, **kwargs): @@ -271,7 +276,8 @@ def _validate(*args, **kwargs): '_validate', _validate) params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm} - self.assertRaises(exc.HTTPBadRequest, self._test_create, params) + self.assertRaises(exc.HTTPBadRequest, + self._test_create, params, no_image=True) @mock.patch('nova.compute.api.API._get_bdm_image_metadata') def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta): From 9c5c653ed2abfd13a41449693264a593aef10fc2 Mon Sep 17 00:00:00 2001 From: liu-sheng Date: Wed, 9 Jul 2014 16:12:37 +0800 Subject: [PATCH 159/486] Log translation hint for nova.api Currently, Log translation is motivated by oslo's move to prioritized translation of strings, as documented at https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation - add log translation hints for warning, error and info levels - move from LOG.warning to LOG.warn - remove use of % as a string formatter, use the log functionality instead Change-Id: I128da6cbb2be1b5b51b4a79fb38b3bf8206431d9 --- nova/api/auth.py | 7 ++-- nova/api/ec2/__init__.py | 13 +++---- nova/api/ec2/cloud.py | 5 +-- nova/api/metadata/handler.py | 28 ++++++++------- nova/api/metadata/vendordata_json.py | 9 ++--- nova/api/openstack/__init__.py | 35 ++++++++++--------- nova/api/openstack/common.py | 17 ++++----- .../openstack/compute/contrib/floating_ips.py | 5 +-- .../openstack/compute/contrib/os_networks.py | 3 +- .../compute/contrib/os_tenant_networks.py | 3 +- .../api/openstack/compute/plugins/v3/hosts.py | 4 +-- .../openstack/compute/plugins/v3/servers.py | 11 +++--- nova/api/openstack/compute/servers.py | 5 +-- nova/api/openstack/compute/views/servers.py | 6 ++-- nova/api/openstack/extensions.py | 5 +-- nova/api/openstack/wsgi.py | 10 +++--- 16 files changed, 92 insertions(+), 74 deletions(-) diff --git a/nova/api/auth.py b/nova/api/auth.py index c2efb97033..710281a00d 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -22,6 +22,7 @@ from nova import context from nova.i18n import _ +from nova.i18n import _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common.middleware import request_id @@ -69,7 +70,7 @@ def pipeline_factory(loader, global_conf, **local_conf): # If the configuration file still contains 'ratelimit_v3', just ignore it. # We will remove this code at next release (J) if 'ratelimit_v3' in pipeline: - LOG.warn(_('ratelimit_v3 is removed from v3 api.')) + LOG.warn(_LW('ratelimit_v3 is removed from v3 api.')) pipeline.remove('ratelimit_v3') return _load_pipeline(loader, pipeline) @@ -156,6 +157,6 @@ def _get_roles(self, req): # Fallback to deprecated role header: roles = req.headers.get('X_ROLE', '') if roles: - LOG.warn(_("Sourcing roles from deprecated X-Role HTTP " - "header")) + LOG.warn(_LW("Sourcing roles from deprecated X-Role HTTP " + "header")) return [r.strip() for r in roles.split(',')] diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 036c6bff7d..eb734d12f5 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -34,6 +34,7 @@ from nova import exception from nova.i18n import _ from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -165,9 +166,9 @@ def __call__(self, req): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=CONF.lockout_window * 60) elif failures >= CONF.lockout_attempts: - LOG.warn(_('Access key %(access_key)s has had %(failures)d ' - 'failed authentications and will be locked out ' - 'for %(lock_mins)d minutes.'), + LOG.warn(_LW('Access key %(access_key)s has had %(failures)d ' + 'failed authentications and will be locked out ' + 'for %(lock_mins)d minutes.'), {'access_key': access_key, 'failures': failures, 'lock_mins': CONF.lockout_minutes}) @@ -489,10 +490,10 @@ def ec2_error_ex(ex, req, code=None, message=None, unexpected=False): if unexpected: log_fun = LOG.error - log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s") + log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s") else: log_fun = LOG.debug - log_msg = _("%(ex_name)s raised: %(ex_str)s") + log_msg = "%(ex_name)s raised: %(ex_str)s" # NOTE(jruzicka): For compatibility with EC2 API, treat expected # exceptions as client (4xx) errors. The exception error code is 500 # by default and most exceptions inherit this from NovaException even @@ -516,7 +517,7 @@ def ec2_error_ex(ex, req, code=None, message=None, unexpected=False): for k in env.keys(): if not isinstance(env[k], six.string_types): env.pop(k) - log_fun(_('Environment: %s') % jsonutils.dumps(env)) + log_fun(_LE('Environment: %s'), jsonutils.dumps(env)) if not message: message = _('Unknown error occurred.') return faults.ec2_error_response(request_id, code, message, status=status) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index aea4c7aafe..f39bd6ef8b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -39,6 +39,7 @@ from nova import db from nova import exception from nova.i18n import _ +from nova.i18n import _LW from nova.image import s3 from nova import network from nova.network.security_group import neutron_driver @@ -1263,8 +1264,8 @@ def associate_address(self, context, instance_id, public_ip, **kwargs): # changed to support specifying a particular fixed_ip if # multiple exist but this may not apply to ec2.. if len(fixed_ips) > 1: - msg = _('multiple fixed_ips exist, using the first: %s') - LOG.warning(msg, fixed_ips[0]) + LOG.warn(_LW('multiple fixed_ips exist, using the first: %s'), + fixed_ips[0]) self.network_api.associate_floating_ip(context, instance, floating_address=public_ip, diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index d93296621e..92ae37c017 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -28,6 +28,8 @@ from nova import conductor from nova import exception from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova import utils @@ -109,9 +111,9 @@ def __call__(self, req): else: if req.headers.get('X-Instance-ID'): LOG.warn( - _("X-Instance-ID present in request headers. The " - "'service_neutron_metadata_proxy' option must be enabled" - " to process this header.")) + _LW("X-Instance-ID present in request headers. The " + "'service_neutron_metadata_proxy' option must be" + "enabled to process this header.")) meta_data = self._handle_remote_ip_request(req) if meta_data is None: @@ -145,7 +147,8 @@ def _handle_remote_ip_request(self, req): raise webob.exc.HTTPInternalServerError(explanation=unicode(msg)) if meta_data is None: - LOG.error(_('Failed to get metadata for ip: %s'), remote_address) + LOG.error(_LE('Failed to get metadata for ip: %s'), + remote_address) return meta_data @@ -178,10 +181,10 @@ def _handle_instance_id_request(self, req): if not utils.constant_time_compare(expected_signature, signature): if instance_id: - LOG.warn(_('X-Instance-ID-Signature: %(signature)s does not ' - 'match the expected value: %(expected_signature)s ' - 'for id: %(instance_id)s. Request From: ' - '%(remote_address)s'), + LOG.warn(_LW('X-Instance-ID-Signature: %(signature)s does ' + 'not match the expected value: ' + '%(expected_signature)s for id: %(instance_id)s.' + ' Request From: %(remote_address)s'), {'signature': signature, 'expected_signature': expected_signature, 'instance_id': instance_id, @@ -201,14 +204,13 @@ def _handle_instance_id_request(self, req): raise webob.exc.HTTPInternalServerError(explanation=unicode(msg)) if meta_data is None: - LOG.error(_('Failed to get metadata for instance id: %s'), + LOG.error(_LE('Failed to get metadata for instance id: %s'), instance_id) if meta_data.instance['project_id'] != tenant_id: - LOG.warning(_("Tenant_id %(tenant_id)s does not match tenant_id " - "of instance %(instance_id)s."), - {'tenant_id': tenant_id, - 'instance_id': instance_id}) + LOG.warn(_LW("Tenant_id %(tenant_id)s does not match tenant_id " + "of instance %(instance_id)s."), + {'tenant_id': tenant_id, 'instance_id': instance_id}) # causes a 404 to be raised meta_data = None diff --git a/nova/api/metadata/vendordata_json.py b/nova/api/metadata/vendordata_json.py index b8e4e53cdd..ee8a938784 100644 --- a/nova/api/metadata/vendordata_json.py +++ b/nova/api/metadata/vendordata_json.py @@ -20,7 +20,7 @@ from oslo.config import cfg from nova.api.metadata import base -from nova.i18n import _ +from nova.i18n import _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -44,12 +44,13 @@ def __init__(self, *args, **kwargs): data = jsonutils.load(fp) except IOError as e: if e.errno == errno.ENOENT: - LOG.warn(logprefix + _("file does not exist")) + LOG.warn(logprefix + _LW("file does not exist")) else: - LOG.warn(logprefix + _("Unexpected IOError when reading")) + LOG.warn(logprefix + _LW("Unexpected IOError when " + "reading")) raise e except ValueError: - LOG.warn(logprefix + _("failed to load json")) + LOG.warn(logprefix + _LW("failed to load json")) raise self._data = data diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b8d6e6568e..b0e1e9e884 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -28,6 +28,9 @@ from nova.api.openstack import wsgi from nova import exception from nova.i18n import _ +from nova.i18n import _LC +from nova.i18n import _LI +from nova.i18n import _LW from nova.i18n import translate from nova import notifications from nova.openstack.common import log as logging @@ -95,7 +98,7 @@ def _error(self, inner, req): status = 500 msg_dict = dict(url=req.url, status=status) - LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers @@ -229,9 +232,9 @@ def _setup_extensions(self, ext_mgr): msg_format_dict = {'collection': collection, 'ext_name': extension.extension.name} if collection not in self.resources: - LOG.warning(_('Extension %(ext_name)s: Cannot extend ' - 'resource %(collection)s: No such resource'), - msg_format_dict) + LOG.warn(_LW('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource'), + msg_format_dict) continue LOG.debug('Extension %(ext_name)s extended resource: ' @@ -276,19 +279,19 @@ def _check_load_extension(ext): if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist: return self._register_extension(ext) else: - LOG.warning(_("Not loading %s because it is " - "in the blacklist"), ext.obj.alias) + LOG.warn(_LW("Not loading %s because it is " + "in the blacklist"), ext.obj.alias) return False else: - LOG.warning( - _("Not loading %s because it is not in the whitelist"), - ext.obj.alias) + LOG.warn( + _LW("Not loading %s because it is not in the " + "whitelist"), ext.obj.alias) return False else: return False if not CONF.osapi_v3.enabled: - LOG.info(_("V3 API has been disabled by configuration")) + LOG.info(_LI("V3 API has been disabled by configuration")) return self.init_only = init_only @@ -301,8 +304,8 @@ def _check_load_extension(ext): CONF.osapi_v3.extensions_whitelist).intersection( CONF.osapi_v3.extensions_blacklist) if len(in_blacklist_and_whitelist) != 0: - LOG.warning(_("Extensions in both blacklist and whitelist: %s"), - list(in_blacklist_and_whitelist)) + LOG.warn(_LW("Extensions in both blacklist and whitelist: %s"), + list(in_blacklist_and_whitelist)) self.api_extension_manager = stevedore.enabled.EnabledExtensionManager( namespace=self.API_EXTENSION_NAMESPACE, @@ -325,7 +328,7 @@ def _check_load_extension(ext): missing_core_extensions = self.get_missing_core_extensions( self.loaded_extension_info.get_extensions().keys()) if not self.init_only and missing_core_extensions: - LOG.critical(_("Missing core API extensions: %s"), + LOG.critical(_LC("Missing core API extensions: %s"), missing_core_extensions) raise exception.CoreAPIMissing( missing_apis=missing_core_extensions) @@ -403,9 +406,9 @@ def _register_controllers(self, ext): controller = extension.controller if collection not in self.resources: - LOG.warning(_('Extension %(ext_name)s: Cannot extend ' - 'resource %(collection)s: No such resource'), - {'ext_name': ext_name, 'collection': collection}) + LOG.warn(_LW('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource'), + {'ext_name': ext_name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index ef46f3cb72..2c0b86dd74 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -30,6 +30,8 @@ from nova.compute import vm_states from nova import exception from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import log as logging from nova import quota @@ -129,9 +131,9 @@ def status_from_state(vm_state, task_state='default'): task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN')) status = task_map.get(task_state, task_map['default']) if status == "UNKNOWN": - LOG.error(_("status is UNKNOWN from vm_state=%(vm_state)s " - "task_state=%(task_state)s. Bad upgrade or db " - "corrupted?"), + LOG.error(_LE("status is UNKNOWN from vm_state=%(vm_state)s " + "task_state=%(task_state)s. Bad upgrade or db " + "corrupted?"), {'vm_state': vm_state, 'task_state': task_state}) return status @@ -273,9 +275,8 @@ def remove_version_from_href(href): new_path = '/'.join(url_parts) if new_path == parsed_url.path: - msg = _('href %s does not contain version') % href - LOG.debug(msg) - raise ValueError(msg) + LOG.debug('href %s does not contain version' % href) + raise ValueError(_('href %s does not contain version') % href) parsed_url = list(parsed_url) parsed_url[2] = new_path @@ -451,8 +452,8 @@ def check_snapshots_enabled(f): @functools.wraps(f) def inner(*args, **kwargs): if not CONF.allow_instance_snapshots: - LOG.warn(_('Rejecting snapshot request, snapshots currently' - ' disabled')) + LOG.warn(_LW('Rejecting snapshot request, snapshots currently' + ' disabled')) msg = _("Instance snapshots are not permitted at this time.") raise webob.exc.HTTPBadRequest(explanation=msg) return f(*args, **kwargs) diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py index 8bded0cc61..5b81150606 100644 --- a/nova/api/openstack/compute/contrib/floating_ips.py +++ b/nova/api/openstack/compute/contrib/floating_ips.py @@ -25,6 +25,7 @@ from nova.compute import utils as compute_utils from nova import exception from nova.i18n import _ +from nova.i18n import _LW from nova import network from nova.openstack.common import log as logging from nova.openstack.common import uuidutils @@ -248,8 +249,8 @@ def _add_floating_ip(self, req, id, body): if not fixed_address: fixed_address = fixed_ips[0]['address'] if len(fixed_ips) > 1: - msg = _('multiple fixed_ips exist, using the first: %s') - LOG.warning(msg, fixed_address) + LOG.warn(_LW('multiple fixed_ips exist, using the first: ' + '%s'), fixed_address) try: self.network_api.associate_floating_ip(context, instance, diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py index 12e396c2a3..2cbb46ff4e 100644 --- a/nova/api/openstack/compute/contrib/os_networks.py +++ b/nova/api/openstack/compute/contrib/os_networks.py @@ -22,6 +22,7 @@ from nova.api.openstack import wsgi from nova import exception from nova.i18n import _ +from nova.i18n import _LI from nova import network from nova.openstack.common import log as logging @@ -97,7 +98,7 @@ def show(self, req, id): def delete(self, req, id): context = req.environ['nova.context'] authorize(context) - LOG.info(_("Deleting network with id %s") % id) + LOG.info(_LI("Deleting network with id %s"), id) try: self.network_api.delete(context, id) except exception.NetworkInUse as e: diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py index b7ecf4957f..9e28b584ae 100644 --- a/nova/api/openstack/compute/contrib/os_tenant_networks.py +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -23,6 +23,7 @@ from nova import context as nova_context from nova import exception from nova.i18n import _ +from nova.i18n import _LI import nova.network from nova.openstack.common import log as logging from nova import quota @@ -122,7 +123,7 @@ def delete(self, req, id): LOG.exception(_("Failed to update usages deallocating " "network.")) - LOG.info(_("Deleting network with id %s") % id) + LOG.info(_LI("Deleting network with id %s"), id) try: self.network_api.delete(context, id) diff --git a/nova/api/openstack/compute/plugins/v3/hosts.py b/nova/api/openstack/compute/plugins/v3/hosts.py index 8d3fd00f0e..8990aa873a 100644 --- a/nova/api/openstack/compute/plugins/v3/hosts.py +++ b/nova/api/openstack/compute/plugins/v3/hosts.py @@ -153,9 +153,9 @@ def _set_enabled_status(self, context, host_name, enabled): on the host. """ if enabled: - LOG.audit(_("Enabling host %s.") % host_name) + LOG.audit(_("Enabling host %s."), host_name) else: - LOG.audit(_("Disabling host %s.") % host_name) + LOG.audit(_("Disabling host %s."), host_name) try: result = self.api.set_host_enabled(context, host_name=host_name, enabled=enabled) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 505d651b8b..b4ab37dec3 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -32,6 +32,7 @@ from nova.compute import flavors from nova import exception from nova.i18n import _ +from nova.i18n import _LW from nova.image import glance from nova import objects from nova.openstack.common import log as logging @@ -96,13 +97,13 @@ def check_whiteblack_lists(ext): if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist: return True else: - LOG.warning(_("Not loading %s because it is " - "in the blacklist"), ext.obj.alias) + LOG.warn(_LW("Not loading %s because it is " + "in the blacklist"), ext.obj.alias) return False else: - LOG.warning( - _("Not loading %s because it is not in the whitelist"), - ext.obj.alias) + LOG.warn( + _LW("Not loading %s because it is not in the " + "whitelist"), ext.obj.alias) return False def check_load_extension(ext): diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index c63cae40ea..2465c5eda5 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -34,6 +34,7 @@ from nova.compute import flavors from nova import exception from nova.i18n import _ +from nova.i18n import _LW from nova import objects from nova.openstack.common import log as logging from nova.openstack.common import strutils @@ -78,8 +79,8 @@ def make_server(elem, detailed=False): global XML_WARNING if not XML_WARNING: - LOG.warning(_('XML support has been deprecated and may be removed ' - 'as early as the Juno release.')) + LOG.warn(_LW('XML support has been deprecated and may be removed ' + 'as early as the Juno release.')) XML_WARNING = True if detailed: diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py index f168aac4e5..6d97c36933 100644 --- a/nova/api/openstack/compute/views/servers.py +++ b/nova/api/openstack/compute/views/servers.py @@ -21,7 +21,7 @@ from nova.api.openstack.compute.views import flavors as views_flavors from nova.api.openstack.compute.views import images as views_images from nova.compute import flavors -from nova.i18n import _ +from nova.i18n import _LW from nova.objects import base as obj_base from nova.openstack.common import log as logging from nova.openstack.common import timeutils @@ -194,8 +194,8 @@ def _get_image(self, request, instance): def _get_flavor(self, request, instance): instance_type = flavors.extract_flavor(instance) if not instance_type: - LOG.warn(_("Instance has had its instance_type removed " - "from the DB"), instance=instance) + LOG.warn(_LW("Instance has had its instance_type removed " + "from the DB"), instance=instance) return {} flavor_id = instance_type["flavorid"] flavor_bookmark = self._flavor_builder._get_bookmark_link(request, diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 65099be0a0..f475a82ebf 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -27,6 +27,7 @@ from nova.api.openstack import xmlutil from nova import exception from nova.i18n import _ +from nova.i18n import _LW from nova.openstack.common import importutils from nova.openstack.common import log as logging import nova.policy @@ -275,8 +276,8 @@ def _load_extensions(self): try: self.load_extension(ext_factory) except Exception as exc: - LOG.warn(_('Failed to load extension %(ext_factory)s: ' - '%(exc)s'), + LOG.warn(_LW('Failed to load extension %(ext_factory)s: ' + '%(exc)s'), {'ext_factory': ext_factory, 'exc': exc}) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 531213e158..0519ea5ed1 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -27,6 +27,8 @@ from nova import exception from nova import i18n from nova.i18n import _ +from nova.i18n import _LE +from nova.i18n import _LI from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils @@ -679,14 +681,14 @@ def __exit__(self, ex_type, ex_value, ex_traceback): # http://bugs.python.org/issue7853 elif issubclass(ex_type, TypeError): exc_info = (ex_type, ex_value, ex_traceback) - LOG.error(_('Exception handling resource: %s') % ex_value, - exc_info=exc_info) + LOG.error(_LE('Exception handling resource: %s'), ex_value, + exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): - LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + LOG.info(_LI("Fault thrown: %s"), unicode(ex_value)) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): - LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + LOG.info(_LI("HTTP exception thrown: %s"), unicode(ex_value)) raise Fault(ex_value) # We didn't handle the exception From 855fe98ef410b773f4a910e48628636c5d18c234 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 17 Jun 2014 22:34:28 -0700 Subject: [PATCH 160/486] Fix and Gate on E265 pep8 E265 makes sure block comment start with '# '. Fix and gate on this new rule as it helps improve comment readability. In the few cases where the comment was just commented out code, remove the comment. Change-Id: Iea1c445df8ddc2b6c17a4ab697ad756eef2f91fa --- nova/api/ec2/__init__.py | 2 +- nova/api/ec2/cloud.py | 2 +- .../compute/contrib/server_diagnostics.py | 2 +- .../openstack/compute/plugins/v3/servers.py | 2 +- nova/api/openstack/compute/servers.py | 6 ++--- nova/api/openstack/wsgi.py | 8 +++---- nova/api/openstack/xmlutil.py | 2 +- nova/api/sizelimit.py | 2 +- nova/cells/state.py | 1 - nova/cmd/baremetal_deploy_helper.py | 2 +- nova/cmd/manage.py | 8 +++---- nova/compute/api.py | 10 ++++----- nova/compute/manager.py | 10 ++++----- nova/compute/utils.py | 2 +- nova/conductor/manager.py | 2 +- nova/conductor/tasks/live_migrate.py | 10 ++++----- nova/console/api.py | 10 ++++----- nova/console/manager.py | 6 ++--- nova/console/xvp.py | 6 ++--- nova/db/sqlalchemy/api.py | 10 ++++----- nova/db/sqlalchemy/models.py | 19 ++++++++-------- nova/image/api.py | 10 ++++----- nova/image/download/file.py | 4 ++-- nova/image/glance.py | 12 +++++----- nova/image/s3.py | 4 ++-- nova/network/linux_net.py | 2 +- nova/network/manager.py | 2 +- nova/network/nova_ipam_lib.py | 6 ++--- nova/objects/fields.py | 8 +++---- nova/safe_utils.py | 2 +- nova/scheduler/host_manager.py | 8 +++---- nova/tests/api/ec2/test_cinder_cloud.py | 4 ++-- nova/tests/api/ec2/test_ec2_validate.py | 8 +++---- .../compute/contrib/test_aggregates.py | 2 +- .../compute/contrib/test_flavor_swap.py | 2 +- .../contrib/test_instance_usage_audit_log.py | 8 +++---- .../compute/extensions/foxinsocks.py | 4 ++-- .../compute/plugins/v3/test_flavors.py | 3 --- .../plugins/v3/test_server_metadata.py | 6 ++--- .../compute/plugins/v3/test_servers.py | 2 +- nova/tests/api/openstack/compute/test_api.py | 8 +++---- nova/tests/api/openstack/compute/test_auth.py | 2 +- .../api/openstack/compute/test_limits.py | 8 +++---- .../openstack/compute/test_server_metadata.py | 12 +++++----- .../api/openstack/compute/test_servers.py | 2 +- .../api/openstack/compute/test_v3_auth.py | 2 +- nova/tests/api/openstack/test_wsgi.py | 2 +- nova/tests/compute/test_compute.py | 18 +++++++-------- nova/tests/console/test_console.py | 2 -- nova/tests/db/test_db_api.py | 20 ++++++++--------- nova/tests/db/test_migration_utils.py | 2 +- nova/tests/glance/stubs.py | 4 ++-- nova/tests/image/fake.py | 2 +- nova/tests/image/test_glance.py | 4 ++-- nova/tests/integrated/test_servers.py | 2 +- nova/tests/network/test_neutronv2.py | 10 ++++----- nova/tests/objects/test_compute_node.py | 4 ++-- nova/tests/objects/test_objects.py | 2 +- nova/tests/scheduler/test_host_filters.py | 14 ++++++------ nova/tests/test_safeutils.py | 10 ++++----- nova/tests/test_utils.py | 2 +- nova/tests/virt/libvirt/fake_imagebackend.py | 2 +- nova/tests/virt/libvirt/test_driver.py | 10 ++++----- nova/tests/virt/xenapi/stubs.py | 2 +- nova/tests/virt/xenapi/test_xenapi.py | 4 ++-- nova/utils.py | 2 +- .../migrate_repo/versions/001_init.py | 4 ---- nova/virt/driver.py | 2 +- nova/virt/firewall.py | 2 +- nova/virt/hyperv/livemigrationutils.py | 1 - nova/virt/hyperv/networkutils.py | 2 +- nova/virt/hyperv/vif.py | 2 +- nova/virt/hyperv/vmops.py | 2 +- nova/virt/hyperv/vmutils.py | 22 +++++++++---------- nova/virt/hyperv/vmutilsv2.py | 6 ++--- nova/virt/hyperv/volumeops.py | 16 +++++++------- nova/virt/hyperv/volumeutils.py | 4 ++-- nova/virt/libvirt/driver.py | 18 +++++++-------- nova/virt/libvirt/imagebackend.py | 4 ++-- nova/virt/libvirt/utils.py | 2 +- nova/virt/libvirt/volume.py | 18 +++++++-------- nova/virt/vmwareapi/driver.py | 2 +- nova/virt/vmwareapi/vm_util.py | 2 +- nova/virt/vmwareapi/vmops.py | 4 ++-- nova/virt/xenapi/agent.py | 2 +- nova/virt/xenapi/fake.py | 6 ++--- nova/virt/xenapi/vm_utils.py | 2 +- nova/virt/xenapi/vmops.py | 4 ++-- nova/virt/xenapi/volumeops.py | 2 +- .../etc/xapi.d/plugins/pluginlib_nova.py | 6 ++--- tox.ini | 4 ++-- 91 files changed, 250 insertions(+), 260 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 036c6bff7d..0c73539f1c 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -76,7 +76,7 @@ CONF.import_opt('use_forwarded_for', 'nova.api.auth') -## Fault Wrapper around all EC2 requests ## +# Fault Wrapper around all EC2 requests class FaultWrapper(wsgi.Middleware): """Calls the middleware stack, captures any exceptions into faults.""" diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index aea4c7aafe..c3014aabbf 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -411,7 +411,7 @@ def describe_key_pairs(self, context, key_name=None, **kwargs): if key_name is not None: key_pairs = [x for x in key_pairs if x['name'] in key_name] - #If looking for non existent key pair + # If looking for non existent key pair if key_name is not None and not key_pairs: msg = _('Could not find key pair(s): %s') % ','.join(key_name) raise exception.KeypairNotFound(message=msg) diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py index 9da8e1e753..215f6f4fac 100644 --- a/nova/api/openstack/compute/contrib/server_diagnostics.py +++ b/nova/api/openstack/compute/contrib/server_diagnostics.py @@ -65,7 +65,7 @@ class Server_diagnostics(extensions.ExtensionDescriptor): def get_resources(self): parent_def = {'member_name': 'server', 'collection_name': 'servers'} - #NOTE(bcwaldon): This should be prefixed with 'os-' + # NOTE(bcwaldon): This should be prefixed with 'os-' ext = extensions.ResourceExtension('diagnostics', ServerDiagnosticsController(), parent=parent_def) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 505d651b8b..7be8a97022 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -454,7 +454,7 @@ def create(self, req, body): # Replace with an extension point when the os-networks # extension is ported. Currently reworked # to take into account is_neutron - #if (self.ext_mgr.is_loaded('os-networks') + # if (self.ext_mgr.is_loaded('os-networks') # or utils.is_neutron()): # requested_networks = server_dict.get('networks') diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index c63cae40ea..5077b188b3 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -692,9 +692,9 @@ def _get_requested_networks(self, requested_networks): "(%s)") % network_uuid raise exc.HTTPBadRequest(explanation=msg) - #fixed IP address is optional - #if the fixed IP address is not provided then - #it will use one of the available IP address from the network + # fixed IP address is optional + # if the fixed IP address is not provided then + # it will use one of the available IP address from the network address = network.get('fixed_ip', None) if address is not None and not utils.is_valid_ip_address( address): diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 531213e158..9dbf3bc357 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -373,7 +373,7 @@ def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toxml('UTF-8') - #NOTE (ameade): the has_atom should be removed after all of the + # NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking @@ -393,7 +393,7 @@ def _to_xml_node(self, doc, metadata, nodename, data): if xmlns: result.setAttribute('xmlns', xmlns) - #TODO(bcwaldon): accomplish this without a type-check + # TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): collections = metadata.get('list_collections', {}) if nodename in collections: @@ -412,7 +412,7 @@ def _to_xml_node(self, doc, metadata, nodename, data): for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) - #TODO(bcwaldon): accomplish this without a type-check + # TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): collections = metadata.get('dict_collections', {}) if nodename in collections: @@ -937,7 +937,7 @@ def _process_stack(self, request, action, action_args, try: contents = {} if self._should_have_body(request): - #allow empty body with PUT and POST + # allow empty body with PUT and POST if request.content_length == 0: contents = {'body': None} else: diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py index b401d83525..679b873a16 100644 --- a/nova/api/openstack/xmlutil.py +++ b/nova/api/openstack/xmlutil.py @@ -990,7 +990,7 @@ def safe_minidom_parse_string(xml_string): return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except (sax.SAXParseException, ValueError, expat.ExpatError, LookupError) as e: - #NOTE(Vijaya Erukala): XML input such as + # NOTE(Vijaya Erukala): XML input such as # # raises LookupError: unknown encoding: TF-8 raise exception.MalformedRequestBody(reason=str(e)) diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py index 1fab96b3df..aa5c42e6aa 100644 --- a/nova/api/sizelimit.py +++ b/nova/api/sizelimit.py @@ -24,7 +24,7 @@ from nova import wsgi -#default request size is 112k +# default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='The maximum body size ' diff --git a/nova/cells/state.py b/nova/cells/state.py index 27261a2c98..427f4087d0 100644 --- a/nova/cells/state.py +++ b/nova/cells/state.py @@ -53,7 +53,6 @@ CONF.import_opt('name', 'nova.cells.opts', group='cells') CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells') CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells') -#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells') CONF.register_opts(cell_state_manager_opts, group='cells') diff --git a/nova/cmd/baremetal_deploy_helper.py b/nova/cmd/baremetal_deploy_helper.py index 348561af99..b4f546d1d9 100644 --- a/nova/cmd/baremetal_deploy_helper.py +++ b/nova/cmd/baremetal_deploy_helper.py @@ -134,7 +134,7 @@ def mkswap(dev, label='swap1'): def mkfs_ephemeral(dev, label="ephemeral0"): - #TODO(jogo) support non-default mkfs options as well + # TODO(jogo) support non-default mkfs options as well disk.mkfs("default", label, dev) diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index 6cd599f3b0..125439c452 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -614,10 +614,10 @@ def modify(self, fixed_range, project=None, host=None, admin_context = context.get_admin_context() network = db.network_get_by_cidr(admin_context, fixed_range) net = {} - #User can choose the following actions each for project and host. - #1) Associate (set not None value given by project/host parameter) - #2) Disassociate (set None by disassociate parameter) - #3) Keep unchanged (project/host key is not added to 'net') + # User can choose the following actions each for project and host. + # 1) Associate (set not None value given by project/host parameter) + # 2) Disassociate (set None by disassociate parameter) + # 3) Keep unchanged (project/host key is not added to 'net') if dis_project: net['project_id'] = None if dis_host: diff --git a/nova/compute/api.py b/nova/compute/api.py index 80ef8f350c..7cebe2c7e1 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1224,7 +1224,7 @@ def _populate_instance_for_create(self, instance, image, security_groups) return instance - #NOTE(bcwaldon): No policy check since this is only used by scheduler and + # NOTE(bcwaldon): No policy check since this is only used by scheduler and # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, instance, security_group, block_device_mapping, num_instances, @@ -1833,7 +1833,7 @@ def get_all(self, context, search_opts=None, sort_key='created_at', parameter. """ - #TODO(bcwaldon): determine the best argument for target here + # TODO(bcwaldon): determine the best argument for target here target = { 'project_id': context.project_id, 'user_id': context.user_id, @@ -3475,7 +3475,7 @@ def add_host_to_aggregate(self, context, aggregate_id, host_name): aggregate.add_host(context, host_name) self._update_az_cache_for_host(context, host_name, aggregate.metadata) - #NOTE(jogo): Send message to host to support resource pools + # NOTE(jogo): Send message to host to support resource pools self.compute_rpcapi.add_aggregate_host(context, aggregate=aggregate, host_param=host_name, host=host_name) aggregate_payload.update({'name': aggregate['name']}) @@ -3799,7 +3799,7 @@ def add_to_instance(self, context, instance, security_group_name): instance_uuid = instance['uuid'] - #check if the security group is associated with the server + # check if the security group is associated with the server if self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupExistsForInstance( security_group_id=security_group['id'], @@ -3822,7 +3822,7 @@ def remove_from_instance(self, context, instance, security_group_name): instance_uuid = instance['uuid'] - #check if the security group is associated with the server + # check if the security group is associated with the server if not self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupNotExistsForInstance( security_group_id=security_group['id'], diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 98f928111b..b5ca1c057d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1088,7 +1088,7 @@ def get_console_topic(self, context): Currently this is just set in the flags for each compute host. """ - #TODO(mdragon): perhaps make this variable by console_type? + # TODO(mdragon): perhaps make this variable by console_type? return '%s.%s' % (CONF.console_topic, CONF.console_host) def get_console_pool_info(self, context, console_type): @@ -2583,7 +2583,7 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref, # This instance.exists message should contain the original # image_ref, not the new one. Since the DB has been updated # to point to the new one... we have to override it. - #TODO(jaypipes): Move generate_image_url() into the nova.image.api + # TODO(jaypipes): Move generate_image_url() into the nova.image.api orig_image_ref_url = glance.generate_image_url(orig_image_ref) extra_usage_info = {'image_ref_url': orig_image_ref_url} self.conductor_api.notify_usage_exists(context, @@ -5400,9 +5400,9 @@ def _sync_power_states(self, context): 'num_vm_instances': num_vm_instances}) for db_instance in db_instances: - #NOTE(melwitt): This must be synchronized as we query state from - # two separate sources, the driver and the database. - # They are set (in stop_instance) and read, in sync. + # NOTE(melwitt): This must be synchronized as we query state from + # two separate sources, the driver and the database. + # They are set (in stop_instance) and read, in sync. @utils.synchronized(db_instance.uuid) def query_driver_power_state_and_sync(): self._query_driver_power_state_and_sync(context, db_instance) diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 8836f5039d..1c593738c6 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -42,7 +42,7 @@ def exception_to_dict(fault): """Converts exceptions to a dict for use in notifications.""" - #TODO(johngarbutt) move to nova/exception.py to share with wrap_exception + # TODO(johngarbutt) move to nova/exception.py to share with wrap_exception code = 500 if hasattr(fault, "kwargs"): diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index ba102f4e3d..d0ab4f981b 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -571,7 +571,7 @@ def _live_migrate(self, context, instance, scheduler_hint, exception.InstanceNotRunning, exception.MigrationPreCheckError) as ex: with excutils.save_and_reraise_exception(): - #TODO(johngarbutt) - eventually need instance actions here + # TODO(johngarbutt) - eventually need instance actions here request_spec = {'instance_properties': { 'uuid': instance['uuid'], }, } diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py index f92f94e22f..7e016fd858 100644 --- a/nova/conductor/tasks/live_migrate.py +++ b/nova/conductor/tasks/live_migrate.py @@ -60,8 +60,8 @@ def execute(self): else: self._check_requested_destination() - #TODO(johngarbutt) need to move complexity out of compute manager - #TODO(johngarbutt) disk_over_commit? + # TODO(johngarbutt) need to move complexity out of compute manager + # TODO(johngarbutt) disk_over_commit? return self.compute_rpcapi.live_migration(self.context, host=self.source, instance=self.instance, @@ -70,7 +70,7 @@ def execute(self): migrate_data=self.migrate_data) def rollback(self): - #TODO(johngarbutt) need to implement the clean up operation + # TODO(johngarbutt) need to implement the clean up operation # but this will make sense only once we pull in the compute # calls, since this class currently makes no state changes, # except to call the compute method, that has no matching @@ -141,7 +141,7 @@ def _call_livem_checks_on_host(self, destination): destination, self.block_migration, self.disk_over_commit) def _find_destination(self): - #TODO(johngarbutt) this retry loop should be shared + # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] image = None if self.instance.image_ref: @@ -187,5 +187,5 @@ def execute(context, instance, destination, destination, block_migration, disk_over_commit) - #TODO(johngarbutt) create a superclass that contains a safe_execute call + # TODO(johngarbutt) create a superclass that contains a safe_execute call return task.execute() diff --git a/nova/console/api.py b/nova/console/api.py index f3cb9d3b42..5b1dbb7050 100644 --- a/nova/console/api.py +++ b/nova/console/api.py @@ -46,11 +46,11 @@ def delete_console(self, context, instance_uuid, console_uuid): rpcapi.remove_console(context, console['id']) def create_console(self, context, instance_uuid): - #NOTE(mdragon): If we wanted to return this the console info - # here, as we would need to do a call. - # They can just do an index later to fetch - # console info. I am not sure which is better - # here. + # NOTE(mdragon): If we wanted to return this the console info + # here, as we would need to do a call. + # They can just do an index later to fetch + # console info. I am not sure which is better + # here. instance = self._get_instance(context, instance_uuid) topic = self._get_console_topic(context, instance['host']) server = None diff --git a/nova/console/manager.py b/nova/console/manager.py index 2e66320a94..95c07352f7 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -110,9 +110,9 @@ def _get_pool_for_instance_host(self, context, instance_host): self.host, console_type) except exception.NotFound: - #NOTE(mdragon): Right now, the only place this info exists is the - # compute worker's flagfile, at least for - # xenserver. Thus we ned to ask. + # NOTE(mdragon): Right now, the only place this info exists is the + # compute worker's flagfile, at least for + # xenserver. Thus we ned to ask. if CONF.stub_compute: pool_info = {'address': '127.0.0.1', 'username': 'test', diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 85e3f43235..48d860def8 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -69,7 +69,7 @@ def console_type(self): def get_port(self, context): """Get available port for consoles that need one.""" - #TODO(mdragon): implement port selection for non multiplex ports, + # TODO(mdragon): implement port selection for non multiplex ports, # we are not using that, but someone else may want # it. return CONF.console_xvp_multiplex_port @@ -131,7 +131,7 @@ def _xvp_stop(self): try: os.kill(pid, signal.SIGTERM) except OSError: - #if it's already not running, no problem. + # if it's already not running, no problem. pass def _xvp_start(self): @@ -196,7 +196,7 @@ def _xvp_encrypt(self, password, is_pool_password=False): if is_pool_password: maxlen = 16 flag = '-x' - #xvp will blow up on passwords that are too long (mdragon) + # xvp will blow up on passwords that are too long (mdragon) password = password[:maxlen] out, err = utils.execute('xvp', flag, process_input=password) if err: diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f9d62a632d..af8ebd2e81 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1759,7 +1759,7 @@ def _build_instance_get(context, session=None, # Already always joined above continue query = query.options(joinedload(column)) - #NOTE(alaski) Stop lazy loading of columns not needed. + # NOTE(alaski) Stop lazy loading of columns not needed. for col in ['metadata', 'system_metadata']: if col not in columns_to_join: query = query.options(noload(col)) @@ -2612,8 +2612,8 @@ def network_get_all_by_uuids(context, network_uuids, project_only): if not result: raise exception.NoNetworksFound() - #check if the result contains all the networks - #we are looking for + # check if the result contains all the networks + # we are looking for for network_uuid in network_uuids: for network in result: if network['uuid'] == network_uuid: @@ -5181,7 +5181,7 @@ def aggregate_delete(context, aggregate_id): if count == 0: raise exception.AggregateNotFound(aggregate_id=aggregate_id) - #Delete Metadata + # Delete Metadata model_query(context, models.AggregateMetadata, session=session).\ filter_by(aggregate_id=aggregate_id).\ @@ -5578,7 +5578,7 @@ def task_log_end_task(context, task_name, period_beginning, period_ending, period_ending, host, session=session).\ update(values) if rows == 0: - #It's not running! + # It's not running! raise exception.TaskNotRunning(task_name=task_name, host=host) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 739c8aaf28..ede89429cc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -537,9 +537,10 @@ class BlockDeviceMapping(BASE, NovaBase): Index('block_device_mapping_instance_uuid_volume_id_idx', 'instance_uuid', 'volume_id'), Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), - #TODO(sshturm) Should be dropped. `virtual_name` was dropped - #in 186 migration, - #Duplicates `block_device_mapping_instance_uuid_device_name_idx` index. + # TODO(sshturm) Should be dropped. `virtual_name` was dropped + # in 186 migration, + # Duplicates `block_device_mapping_instance_uuid_device_name_idx` + # index. Index("block_device_mapping_instance_uuid_virtual_name" "_device_name_idx", 'instance_uuid', 'device_name'), ) @@ -569,7 +570,7 @@ class BlockDeviceMapping(BASE, NovaBase): # With EC2 API, # default True for ami specified device. # default False for created with other timing. - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db delete_on_termination = Column(Boolean, default=False) snapshot_id = Column(String(36)) @@ -735,7 +736,7 @@ class Migration(BASE, NovaBase): old_instance_type_id = Column(Integer()) new_instance_type_id = Column(Integer()) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - #TODO(_cerberus_): enum + # TODO(_cerberus_): enum status = Column(String(255)) instance = relationship("Instance", foreign_keys=instance_uuid, @@ -838,12 +839,12 @@ class FixedIp(BASE, NovaBase): instance_uuid = Column(String(36), ForeignKey('instances.uuid')) # associated means that a fixed_ip has its instance_id column set # allocated means that a fixed_ip has its virtual_interface_id column set - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db allocated = Column(Boolean, default=False) # leased means dhcp bridge has leased the ip - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db leased = Column(Boolean, default=False) - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db reserved = Column(Boolean, default=False) host = Column(String(255)) network = relationship(Network, @@ -879,7 +880,7 @@ class FloatingIp(BASE, NovaBase): project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) auto_assigned = Column(Boolean, default=False) - #TODO(sshturm) add default in db + # TODO(sshturm) add default in db pool = Column(String(255)) interface = Column(String(255)) fixed_ip = relationship(FixedIp, diff --git a/nova/image/api.py b/nova/image/api.py index c5f983a6f4..3ececf2fc0 100644 --- a/nova/image/api.py +++ b/nova/image/api.py @@ -46,11 +46,11 @@ def _get_session(self, _context): :param context: The `nova.context.Context` object for the request """ - #TODO(jaypipes): Refactor glance.get_remote_image_service and - # glance.get_default_image_service into a single - # method that takes a context and actually respects - # it, returning a real session object that keeps - # the context alive... + # TODO(jaypipes): Refactor glance.get_remote_image_service and + # glance.get_default_image_service into a single + # method that takes a context and actually respects + # it, returning a real session object that keeps + # the context alive... return glance.get_default_image_service() def get_all(self, context, **kwargs): diff --git a/nova/image/download/file.py b/nova/image/download/file.py index 93ec551c65..a416835c87 100644 --- a/nova/image/download/file.py +++ b/nova/image/download/file.py @@ -70,7 +70,7 @@ class FileTransfer(xfer_base.TransferBase): desc_required_keys = ['id', 'mountpoint'] - #NOTE(jbresnah) because the group under which these options are added is + # NOTE(jbresnah) because the group under which these options are added is # dyncamically determined these options need to stay out of global space # or they will confuse generate_sample.sh filesystem_opts = [ @@ -143,7 +143,7 @@ def _normalize_destination(self, nova_mount, glance_mount, path): def download(self, context, url_parts, dst_file, metadata, **kwargs): self.filesystems = self._get_options() if not self.filesystems: - #NOTE(jbresnah) when nothing is configured assume legacy behavior + # NOTE(jbresnah) when nothing is configured assume legacy behavior nova_mountpoint = '/' glance_mountpoint = '/' else: diff --git a/nova/image/glance.py b/nova/image/glance.py index 7d20ad277c..b28629a980 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -148,7 +148,7 @@ def _create_glance_client(context, host, port, use_ssl, version=1): params['token'] = context.auth_token params['identity_headers'] = generate_identity_headers(context) if utils.is_valid_ipv6(host): - #if so, it is ipv6 address, need to wrap it with '[]' + # if so, it is ipv6 address, need to wrap it with '[]' host = '[%s]' % host endpoint = '%s://%s:%s' % (scheme, host, port) return glanceclient.Client(str(version), endpoint, **params) @@ -250,7 +250,7 @@ class GlanceImageService(object): def __init__(self, client=None): self._client = client or GlanceClientWrapper() - #NOTE(jbresnah) build the table of download handlers at the beginning + # NOTE(jbresnah) build the table of download handlers at the beginning # so that operators can catch errors at load time rather than whenever # a user attempts to use a module. Note this cannot be done in glance # space when this python module is loaded because the download module @@ -366,7 +366,7 @@ def update(self, context, image_id, image_meta, data=None, """Modify the given image with the new data.""" image_meta = _translate_to_glance(image_meta) image_meta['purge_props'] = purge_props - #NOTE(bcwaldon): id is not an editable field, but it is likely to be + # NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) if data: @@ -529,7 +529,7 @@ def _convert_to_string(metadata): def _extract_attributes(image): - #NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform + # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform # a get(), resulting in a useless request back to glance. This list is # therefore sorted, with dependent attributes as the end # 'deleted_at' depends on 'deleted' @@ -552,7 +552,7 @@ def _extract_attributes(image): # image may not have 'name' attr elif attr == 'name': output[attr] = getattr(image, attr, None) - #NOTE(liusheng): queued image may not have these attributes and 'name' + # NOTE(liusheng): queued image may not have these attributes and 'name' elif queued and attr in queued_exclude_attrs: output[attr] = getattr(image, attr, None) else: @@ -624,7 +624,7 @@ def get_remote_image_service(context, image_href): :returns: a tuple of the form (image_service, image_id) """ - #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a + # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() diff --git a/nova/image/s3.py b/nova/image/s3.py index 05a532baee..fa7278e257 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -160,7 +160,7 @@ def update(self, context, image_id, metadata, data=None): return self._translate_uuid_to_id(context, image) def detail(self, context, **kwargs): - #NOTE(bcwaldon): sort asc to make sure we assign lower ids + # NOTE(bcwaldon): sort asc to make sure we assign lower ids # to older images kwargs.setdefault('sort_dir', 'asc') images = self.service.detail(context, **kwargs) @@ -264,7 +264,7 @@ def _translate_dependent_image_id(image_key, image_id): 'properties': properties}) metadata['properties']['image_state'] = 'pending' - #TODO(bcwaldon): right now, this removes user-defined ids. + # TODO(bcwaldon): right now, this removes user-defined ids. # We need to re-enable this. metadata.pop('id', None) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 84885be6d5..e88cf25f67 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -980,7 +980,7 @@ def get_dhcp_opts(context, network_ref): vifs = objects.VirtualInterfaceList.get_by_instance_uuid( context, instance_uuid) if vifs: - #offer a default gateway to the first virtual interface + # offer a default gateway to the first virtual interface default_gw_vif[instance_uuid] = vifs[0].id for fixedip in fixedips: diff --git a/nova/network/manager.py b/nova/network/manager.py index ac83165c2b..4e4ae7fc70 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1372,7 +1372,7 @@ def setup_networks_on_host(self, context, instance_id, host, for vif in vifs: network = objects.Network.get_by_id(context, vif.network_id) if not network.multi_host: - #NOTE (tr3buchet): if using multi_host, host is instance[host] + # NOTE (tr3buchet): if using multi_host, host is instance[host] host = network['host'] if self.host == host or host is None: # at this point i am the correct host, or host doesn't diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py index 58afe93d23..295af08c4c 100644 --- a/nova/network/nova_ipam_lib.py +++ b/nova/network/nova_ipam_lib.py @@ -50,9 +50,9 @@ def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None): 'version': 4, 'dns1': n.dns1, 'dns2': n.dns2} - #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4. - # this is probably bad as there is no way to add v6 - # dns to nova + # TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4. + # this is probably bad as there is no way to add v6 + # dns to nova subnet_v6 = { 'network_id': n.uuid, 'cidr': n.cidr_v6, diff --git a/nova/objects/fields.py b/nova/objects/fields.py index fad6011c5e..27eb12fd96 100644 --- a/nova/objects/fields.py +++ b/nova/objects/fields.py @@ -405,10 +405,10 @@ def coerce(self, obj, attr, value): raise ValueError(_('A dict is required here')) for key, element in value.items(): if not isinstance(key, six.string_types): - #NOTE(guohliu) In order to keep compatibility with python3 - #we need to use six.string_types rather than basestring here, - #since six.string_types is a tuple, so we need to pass the - #real type in. + # NOTE(guohliu) In order to keep compatibility with python3 + # we need to use six.string_types rather than basestring here, + # since six.string_types is a tuple, so we need to pass the + # real type in. raise KeyTypeError(six.string_types[0], key) value[key] = self._element_type.coerce( obj, '%s["%s"]' % (attr, key), element) diff --git a/nova/safe_utils.py b/nova/safe_utils.py index a6d2734733..ce9499bf80 100644 --- a/nova/safe_utils.py +++ b/nova/safe_utils.py @@ -30,7 +30,7 @@ def getcallargs(function, *args, **kwargs): keyed_args.update(kwargs) - #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in + # NOTE(alaski) the implicit 'self' or 'cls' argument shows up in # argnames but not in args or kwargs. Uses 'in' rather than '==' because # some tests use 'self2'. if 'self' in argnames[0] or 'cls' == argnames[0]: diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py index 9cea311ebf..2708b843e2 100644 --- a/nova/scheduler/host_manager.py +++ b/nova/scheduler/host_manager.py @@ -149,9 +149,9 @@ def update_service(self, service): self.service = ReadOnlyDict(service) def _update_metrics_from_compute_node(self, compute): - #NOTE(llu): The 'or []' is to avoid json decode failure of None - # returned from compute.get, because DB schema allows - # NULL in the metrics column + # NOTE(llu): The 'or []' is to avoid json decode failure of None + # returned from compute.get, because DB schema allows + # NULL in the metrics column metrics = compute.get('metrics', []) or [] if metrics: metrics = jsonutils.loads(metrics) @@ -189,7 +189,7 @@ def update_from_compute_node(self, compute): self.disk_mb_used = compute['local_gb_used'] * 1024 - #NOTE(jogo) free_ram_mb can be negative + # NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute['free_ram_mb'] self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute['local_gb'] diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index bb48b68312..6a8c51dc1d 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -852,7 +852,7 @@ def test_stop_start_with_volume(self): self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") - #Here we puke... + # Here we puke... self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") @@ -990,7 +990,7 @@ def test_run_with_snapshot(self): self._assert_volume_attached(vol, instance_uuid, mountpoint) - #Just make sure we found them + # Just make sure we found them self.assertTrue(vol1_id) self.assertTrue(vol2_id) diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index 4f1a11481d..841def64e9 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -106,7 +106,7 @@ def tearDown(self): super(EC2ValidateTestCase, self).tearDown() fake.FakeImageService_reset() - #EC2_API tests (InvalidInstanceID.Malformed) + # EC2_API tests (InvalidInstanceID.Malformed) def test_console_output(self): for ec2_id, e in self.ec2_id_exception_map: self.assertRaises(e, @@ -215,7 +215,7 @@ def test_validate_ec2_timestamp_invalid_format(self): def test_validate_ec2_timestamp_advanced_time(self): - #EC2 request with Timestamp in advanced time + # EC2 request with Timestamp in advanced time timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250) params = {'Timestamp': timeutils.strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ")} @@ -252,14 +252,14 @@ def test_validate_ec2_req_not_expired(self): def test_validate_Expires_timestamp_invalid_format(self): - #EC2 request with invalid Expires + # EC2 request with invalid Expires params = {'Expires': '2011-04-22T11:29:49'} expired = ec2utils.is_ec2_timestamp_expired(params) self.assertTrue(expired) def test_validate_ec2_req_timestamp_Expires(self): - #EC2 request with both Timestamp and Expires + # EC2 request with both Timestamp and Expires params = {'Timestamp': '2011-04-22T11:29:49Z', 'Expires': timeutils.isotime()} self.assertRaises(exception.InvalidRequest, diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py index 9b84b9dc8b..553ba0b727 100644 --- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py +++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py @@ -409,7 +409,7 @@ def stub_add_host_to_aggregate(context, aggregate, host): raise KeyError self.stubs.Set(self.controller.api, "add_host_to_aggregate", stub_add_host_to_aggregate) - #NOTE(mtreinish) The check for a KeyError here is to ensure that + # NOTE(mtreinish) The check for a KeyError here is to ensure that # if add_host_to_aggregate() raises a KeyError it propagates. At # one point the api code would mask the error as a HTTPBadRequest. # This test is to ensure that this doesn't occur again. diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py index 1136facec5..9494539a4b 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py @@ -38,7 +38,7 @@ } -#TOD(jogo) dedup these across nova.api.openstack.contrib.test_flavor* +# TODO(jogo) dedup these across nova.api.openstack.contrib.test_flavor* def fake_flavor_get_by_flavor_id(flavorid, ctxt=None): return FAKE_FLAVORS['flavor %s' % flavorid] diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py index 970cb8ff16..0ff6ec3105 100644 --- a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py +++ b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py @@ -40,11 +40,11 @@ end3 = datetime.datetime(2012, 7, 7, 6, 0, 0) -#test data +# test data TEST_LOGS1 = [ - #all services done, no errors. + # all services done, no errors. dict(host="plonk", period_beginning=begin1, period_ending=end1, state="DONE", errors=0, task_items=23, message="test1"), dict(host="baz", period_beginning=begin1, period_ending=end1, @@ -57,7 +57,7 @@ TEST_LOGS2 = [ - #some still running... + # some still running... dict(host="plonk", period_beginning=begin2, period_ending=end2, state="DONE", errors=0, task_items=23, message="test5"), dict(host="baz", period_beginning=begin2, period_ending=end2, @@ -70,7 +70,7 @@ TEST_LOGS3 = [ - #some errors.. + # some errors.. dict(host="plonk", period_beginning=begin3, period_ending=end3, state="DONE", errors=0, task_items=23, message="test9"), dict(host="baz", period_beginning=begin3, period_ending=end3, diff --git a/nova/tests/api/openstack/compute/extensions/foxinsocks.py b/nova/tests/api/openstack/compute/extensions/foxinsocks.py index 5785f1037a..7d1e273ea7 100644 --- a/nova/tests/api/openstack/compute/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/compute/extensions/foxinsocks.py @@ -45,7 +45,7 @@ def _fail(self, req, id, body): class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): - #NOTE: This only handles JSON responses. + # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') @@ -53,7 +53,7 @@ def show(self, req, resp_obj, id): class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): - #NOTE: This only handles JSON responses. + # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['big_bands'] = 'Pig Bands!' diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py index 5f9f1a883d..52bdaddaf9 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py @@ -484,9 +484,6 @@ def setUp(self): super(FlavorDisabledTest, self).setUp() fakes.stub_out_nw_api(self.stubs) - #def fake_flavor_get_all(*args, **kwargs): - # return FAKE_FLAVORS - # self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list", fake_get_all_flavors_sorted_list) self.stubs.Set(nova.compute.flavors, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py index 1ab5e86f8a..ce1f343b31 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py @@ -515,19 +515,19 @@ def test_invalid_metadata_items_on_update_item(self): req.method = 'PUT' req.headers["content-type"] = "application/json" - #test for long key + # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update_all, req, self.uuid, data) - #test for long value + # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update_all, req, self.uuid, data) - #test for empty key. + # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index f00e4b0bf4..a938038a11 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -2880,7 +2880,7 @@ def test_build_server_detail_with_fault_but_active(self): self.assertNotIn('fault', output['server']) def test_build_server_detail_active_status(self): - #set the power state of the instance to running + # set the power state of the instance to running self.instance['vm_state'] = vm_states.ACTIVE self.instance['progress'] = 100 image_bookmark = "http://localhost:9292/images/5" diff --git a/nova/tests/api/openstack/compute/test_api.py b/nova/tests/api/openstack/compute/test_api.py index fc83c4fd19..15f9fe499a 100644 --- a/nova/tests/api/openstack/compute/test_api.py +++ b/nova/tests/api/openstack/compute/test_api.py @@ -79,7 +79,7 @@ def test_exceptions_are_converted_to_faults_webob_exc(self): def raise_webob_exc(req): raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc') - #api.application = raise_webob_exc + # api.application = raise_webob_exc api = self._wsgi_app(raise_webob_exc) resp = webob.Request.blank('/').get_response(api) self.assertEqual(resp.status_int, 404, resp.body) @@ -90,7 +90,7 @@ def raise_api_fault(req): exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc') return wsgi.Fault(exc) - #api.application = raise_api_fault + # api.application = raise_api_fault api = self._wsgi_app(raise_api_fault) resp = webob.Request.blank('/').get_response(api) self.assertIn('itemNotFound', resp.body) @@ -101,7 +101,7 @@ def test_exceptions_are_converted_to_faults_exception(self): def fail(req): raise Exception("Threw an exception") - #api.application = fail + # api.application = fail api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertIn('{"computeFault', resp.body) @@ -112,7 +112,7 @@ def test_exceptions_are_converted_to_faults_exception_xml(self): def fail(req): raise Exception("Threw an exception") - #api.application = fail + # api.application = fail api = self._wsgi_app(fail) resp = webob.Request.blank('/.xml').get_response(api) self.assertIn(' terminate + # check failed to schedule --> terminate params = {'vm_state': vm_states.ERROR} instance = self._create_fake_instance_obj(params=params) self.compute.terminate_instance(self.context, instance, [], []) @@ -4725,7 +4725,7 @@ def throw_up(*args, **kwargs): self.context.elevated(), instance.uuid, 'pre-migrating') - #verify + # verify self.assertRaises(test.TestingException, self.compute.resize_instance, self.context, instance=instance, migration=migration, image={}, @@ -6175,7 +6175,7 @@ def test_instance_build_timeout_mixed_instances(self): instance.update(filters) old_instances.append(fake_instance.fake_db_instance(**instance)) - #not expired + # not expired instances = list(old_instances) # copy the contents of old_instances new_instance = { 'uuid': str(uuid.uuid4()), @@ -10640,7 +10640,7 @@ def test_rebuild_with_instance_in_stopped_state(self): """Confirm evacuate scenario updates vm_state to stopped if instance is in stopped state """ - #Initialize the VM to stopped state + # Initialize the VM to stopped state db.instance_update(self.context, self.inst_ref['uuid'], {"vm_state": vm_states.STOPPED}) self.inst_ref['vm_state'] = vm_states.STOPPED @@ -10650,7 +10650,7 @@ def test_rebuild_with_instance_in_stopped_state(self): self._rebuild() - #Check the vm state is reset to stopped + # Check the vm state is reset to stopped instance = db.instance_get(self.context, self.inst_ref['id']) self.assertEqual(instance['vm_state'], vm_states.STOPPED) diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py index b107cdf4fe..bd3dbeb6b6 100644 --- a/nova/tests/console/test_console.py +++ b/nova/tests/console/test_console.py @@ -47,8 +47,6 @@ def setUp(self): def _create_instance(self): """Create a test instance.""" inst = {} - #inst['host'] = self.host - #inst['name'] = 'instance-1234' inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index adaf68fc1f..a0969ec91a 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -332,7 +332,7 @@ def test_aggregate_create_with_metadata(self): matchers.DictMatches(_get_fake_aggr_metadata())) def test_aggregate_create_delete_create_with_metadata(self): - #test for bug 1052479 + # test for bug 1052479 ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) @@ -1635,29 +1635,29 @@ def test_instance_get_all_by_filters_tags(self): instance = self.create_instance_with_args( metadata={'foo': 'bar'}) self.create_instance_with_args() - #For format 'tag-' + # For format 'tag-' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-key', 'value': 'foo'}, {'name': 'tag-value', 'value': 'bar'}, ]}) self._assertEqualListsOfInstances([instance], result) - #For format 'tag:' + # For format 'tag:' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'bar'}, ]}) self._assertEqualListsOfInstances([instance], result) - #For non-existent tag + # For non-existent tag result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'barred'}, ]}) self.assertEqual([], result) - #Confirm with deleted tags + # Confirm with deleted tags db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo') - #For format 'tag-' + # For format 'tag-' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-key', 'value': 'foo'}, @@ -1668,7 +1668,7 @@ def test_instance_get_all_by_filters_tags(self): {'name': 'tag-value', 'value': 'bar'} ]}) self.assertEqual([], result) - #For format 'tag:' + # For format 'tag:' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'bar'}, @@ -2784,10 +2784,10 @@ def assert_multi_filter_flavor_get(filters=None): real_it = db.flavor_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects(expected_it, real_it) - #no filter + # no filter assert_multi_filter_flavor_get() - #test only with one filter + # test only with one filter for filt in mem_filts: assert_multi_filter_flavor_get(filt) for filt in root_filts: @@ -2797,7 +2797,7 @@ def assert_multi_filter_flavor_get(filters=None): for filt in is_public_filts: assert_multi_filter_flavor_get(filt) - #test all filters together + # test all filters together for mem in mem_filts: for root in root_filts: for disabled in disabled_filts: diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py index d3f93710e3..ecdb298db7 100644 --- a/nova/tests/db/test_migration_utils.py +++ b/nova/tests/db/test_migration_utils.py @@ -90,7 +90,7 @@ def test_check_shadow_table(self): Column('c', String(256))) table.create() - #check missing shadow table + # check missing shadow table self.assertRaises(NoSuchTableError, utils.check_shadow_table, engine, table_name) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 8e8e3aa0be..d088c299ed 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -32,12 +32,12 @@ def __init__(self, images=None, version=None, endpoint=None, **params): _images = images or [] map(lambda image: self.create(**image), _images) - #NOTE(bcwaldon): HACK to get client.images.* to work + # NOTE(bcwaldon): HACK to get client.images.* to work self.images = lambda: None for fn in ('list', 'get', 'data', 'create', 'update', 'delete'): setattr(self.images, fn, getattr(self, fn)) - #TODO(bcwaldon): implement filters + # TODO(bcwaldon): implement filters def list(self, filters=None, marker=None, limit=30, page_size=20): if marker is None: index = 0 diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py index 0e8d3ac114..e810ef4e99 100644 --- a/nova/tests/image/fake.py +++ b/nova/tests/image/fake.py @@ -154,7 +154,7 @@ def __init__(self): self._imagedata = {} super(_FakeImageService, self).__init__() - #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir + # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 557ab25298..c36eec662d 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -264,7 +264,7 @@ def data(self, image_id): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) service = self._create_image_service(client) - #NOTE(Jbresnah) The following options must be added after the module + # NOTE(Jbresnah) The following options must be added after the module # has added the specific groups. self.flags(group='image_file_url:gluster', id=fs_id) self.flags(group='image_file_url:gluster', mountpoint=mountpoint) @@ -304,7 +304,7 @@ def _fake_copyfile(source, dest): self.flags(allowed_direct_url_schemes=['file'], group='glance') self.flags(group='image_file_url', filesystems=['gluster']) service = self._create_image_service(client) - #NOTE(Jbresnah) The following options must be added after the module + # NOTE(Jbresnah) The following options must be added after the module # has added the specific groups. self.flags(group='image_file_url:gluster', id='someotherid') self.flags(group='image_file_url:gluster', mountpoint=mountpoint) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 06e8c0401f..110bad3844 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -282,7 +282,7 @@ def _wait_for_deletion(self, server_id): LOG.debug("Found_server=%s" % found_server) # TODO(justinsb): Mock doesn't yet do accurate state changes - #if found_server['status'] != 'deleting': + # if found_server['status'] != 'deleting': # break time.sleep(.1) diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 5bd1696054..1ff73b9361 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -39,10 +39,10 @@ CONF = cfg.CONF -#NOTE: Neutron client raises Exception which is discouraged by HACKING. -# We set this variable here and use it for assertions below to avoid -# the hacking checks until we can make neutron client throw a custom -# exception class instead. +# NOTE: Neutron client raises Exception which is discouraged by HACKING. +# We set this variable here and use it for assertions below to avoid +# the hacking checks until we can make neutron client throw a custom +# exception class instead. NEUTRON_CLIENT_EXCEPTION = Exception @@ -2525,7 +2525,7 @@ def client_mock(*args, **kwargs): client.Client.__init__(**kwargs).WithSideEffects(client_mock) self.mox.ReplayAll() - #clean global + # clean global token_store = neutronv2.AdminTokenStore.get() token_store.admin_auth_token = None if admin_context: diff --git a/nova/tests/objects/test_compute_node.py b/nova/tests/objects/test_compute_node.py index 6b426e608e..a7b89bc22c 100644 --- a/nova/tests/objects/test_compute_node.py +++ b/nova/tests/objects/test_compute_node.py @@ -89,7 +89,7 @@ def test_create(self): compute = compute_node.ComputeNode() compute.service_id = 456 compute.stats = fake_stats - #NOTE (pmurray): host_ip is coerced to an IPAddress + # NOTE (pmurray): host_ip is coerced to an IPAddress compute.host_ip = fake_host_ip compute.create(self.context) self.compare_obj(compute, fake_compute_node, @@ -121,7 +121,7 @@ def test_save(self): compute.id = 123 compute.vcpus_used = 3 compute.stats = fake_stats - #NOTE (pmurray): host_ip is coerced to an IPAddress + # NOTE (pmurray): host_ip is coerced to an IPAddress compute.host_ip = fake_host_ip compute.save(self.context) self.compare_obj(compute, fake_compute_node, diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 5ff5159968..666d339ed4 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -302,7 +302,7 @@ def compare_obj(self, obj, db_obj, subs=None, allow_missing=None, def json_comparator(self, expected, obj_val): # json-ify an object field for comparison with its db str - #equivalent + # equivalent self.assertEqual(expected, jsonutils.dumps(obj_val)) def str_comparator(self, expected, obj_val): diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 634cf66927..eb4de0cb29 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -464,15 +464,15 @@ def test_type_filter(self): service = {'disabled': False} host = fakes.FakeHostState('fake_host', 'fake_node', {'service': service}) - #True since empty + # True since empty self.assertTrue(filt_cls.host_passes(host, filter_properties)) fakes.FakeInstance(context=self.context, params={'host': 'fake_host', 'instance_type_id': 1}) - #True since same type + # True since same type self.assertTrue(filt_cls.host_passes(host, filter_properties)) - #False since different type + # False since different type self.assertFalse(filt_cls.host_passes(host, filter2_properties)) - #False since node not homogeneous + # False since node not homogeneous fakes.FakeInstance(context=self.context, params={'host': 'fake_host', 'instance_type_id': 2}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) @@ -488,13 +488,13 @@ def test_aggregate_type_filter(self): service = {'disabled': False} host = fakes.FakeHostState('fake_host', 'fake_node', {'service': service}) - #True since no aggregates + # True since no aggregates self.assertTrue(filt_cls.host_passes(host, filter_properties)) - #True since type matches aggregate, metadata + # True since type matches aggregate, metadata self._create_aggregate_with_host(name='fake_aggregate', hosts=['fake_host'], metadata={'instance_type': 'fake1'}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) - #False since type matches aggregate, metadata + # False since type matches aggregate, metadata self.assertFalse(filt_cls.host_passes(host, filter2_properties)) def test_ram_filter_fails_on_memory(self): diff --git a/nova/tests/test_safeutils.py b/nova/tests/test_safeutils.py index e42ddea5c5..66d20ca79e 100644 --- a/nova/tests/test_safeutils.py +++ b/nova/tests/test_safeutils.py @@ -24,7 +24,7 @@ def test_all_kwargs(self): args = () kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -37,7 +37,7 @@ def test_all_args(self): args = ({'uuid': 1}, 3, 4) kwargs = {} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -50,7 +50,7 @@ def test_mixed_args(self): args = ({'uuid': 1}, 3) kwargs = {'blue': 4} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -63,7 +63,7 @@ def test_partial_kwargs(self): args = () kwargs = {'instance': {'uuid': 1}, 'red': 3} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) @@ -76,7 +76,7 @@ def test_partial_args(self): args = ({'uuid': 1}, 3) kwargs = {} callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs) - #implicit self counts as an arg + # implicit self counts as an arg self.assertEqual(4, len(callargs)) self.assertIn('instance', callargs) self.assertEqual({'uuid': 1}, callargs['instance']) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 567630537a..77660fe9ed 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -381,7 +381,7 @@ class AuditPeriodTest(test.NoDBTestCase): def setUp(self): super(AuditPeriodTest, self).setUp() - #a fairly random time to test with + # a fairly random time to test with self.test_time = datetime.datetime(second=23, minute=12, hour=8, diff --git a/nova/tests/virt/libvirt/fake_imagebackend.py b/nova/tests/virt/libvirt/fake_imagebackend.py index 0946b1a6db..f2a0de969b 100644 --- a/nova/tests/virt/libvirt/fake_imagebackend.py +++ b/nova/tests/virt/libvirt/fake_imagebackend.py @@ -53,7 +53,7 @@ def libvirt_info(self, disk_bus, disk_dev, device_type, return FakeImage(instance, name) def snapshot(self, path, image_type=''): - #NOTE(bfilippov): this is done in favor for + # NOTE(bfilippov): this is done in favor for # snapshot tests in test_libvirt.LibvirtConnTestCase return imagebackend.Backend(True).snapshot(path, image_type) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index c793de3ba2..f8a8f9b81a 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -4476,7 +4476,7 @@ def fake_lookup(instance_name): self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}} @@ -4518,7 +4518,7 @@ def fake_lookup(instance_name): self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}} @@ -4559,7 +4559,7 @@ def fake_lookup(instance_name): self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {} self.mox.ReplayAll() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -4595,7 +4595,7 @@ def fake_lookup(instance_name): self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}} @@ -4650,7 +4650,7 @@ def fake_lookup(instance_name): self.compute._rollback_live_migration(self.context, instance_ref, 'dest', False) - #start test + # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}} diff --git a/nova/tests/virt/xenapi/stubs.py b/nova/tests/virt/xenapi/stubs.py index 3ad289d1c6..a2225af388 100644 --- a/nova/tests/virt/xenapi/stubs.py +++ b/nova/tests/virt/xenapi/stubs.py @@ -44,7 +44,7 @@ def fake_fetch_image(context, session, instance, name_label, image, type): stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) def fake_wait_for_vhd_coalesce(*args): - #TODO(sirp): Should we actually fake out the data here + # TODO(sirp): Should we actually fake out the data here return "fakeparent", "fakebase" stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 2bb9dfceb9..ede8be7007 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -1165,7 +1165,7 @@ def test_spawn_with_resetnetwork_alternative_returncode(self): def fake_resetnetwork(self, method, args): fake_resetnetwork.called = True - #NOTE(johngarbutt): as returned by FreeBSD and Gentoo + # NOTE(johngarbutt): as returned by FreeBSD and Gentoo return jsonutils.dumps({'returncode': '500', 'message': 'success'}) self.stubs.Set(stubs.FakeSessionForVMTests, @@ -2741,7 +2741,7 @@ def test_do_refresh_security_group_rules(self): 'from_port': 200, 'to_port': 299, 'cidr': '192.168.99.0/24'}) - #validate the extra rule + # validate the extra rule self.fw.refresh_security_group_rules(secgroup) regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299' ' -s 192.168.99.0/24') diff --git a/nova/utils.py b/nova/utils.py index 1ca10b8e1b..2f41914a3c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -825,7 +825,7 @@ def mkfs(fs, path, label=None, run_as_root=False): args = ['mkswap'] else: args = ['mkfs', '-t', fs] - #add -F to force no interactive execute on non-block device. + # add -F to force no interactive execute on non-block device. if fs in ('ext3', 'ext4', 'ntfs'): args.extend(['-F']) if label: diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py index 0cf0b637ab..351ca20f64 100644 --- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py +++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py @@ -41,7 +41,6 @@ def upgrade(migrate_engine): Column('prov_vlan_id', Integer), Column('terminal_port', Integer), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_interfaces = Table('bm_interfaces', meta, @@ -56,7 +55,6 @@ def upgrade(migrate_engine): Column('port_no', Integer), Column('vif_uuid', String(length=36), unique=True), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_pxe_ips = Table('bm_pxe_ips', meta, @@ -69,7 +67,6 @@ def upgrade(migrate_engine): Column('bm_node_id', Integer), Column('server_address', String(length=255), unique=True), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_deployments = Table('bm_deployments', meta, @@ -85,7 +82,6 @@ def upgrade(migrate_engine): Column('root_mb', Integer), Column('swap_mb', Integer), mysql_engine='InnoDB', - #mysql_charset='utf8' ) bm_nodes.create() diff --git a/nova/virt/driver.py b/nova/virt/driver.py index acfb90f26d..e6f7f7264c 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -1145,7 +1145,7 @@ def manage_image_cache(self, context, all_instances): def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate.""" - #NOTE(jogo) Currently only used for XenAPI-Pool + # NOTE(jogo) Currently only used for XenAPI-Pool raise NotImplementedError() def remove_from_aggregate(self, context, aggregate, host, **kwargs): diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index 452edbf05a..15e65ed9c7 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -344,7 +344,7 @@ def instance_rules(self, instance, network_info): # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) - #Allow project network traffic + # Allow project network traffic if CONF.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py index 579965897e..4ff16fc2ab 100644 --- a/nova/virt/hyperv/livemigrationutils.py +++ b/nova/virt/hyperv/livemigrationutils.py @@ -181,7 +181,6 @@ def _get_vhd_setting_data(self, vm): for sasd in sasds: if (sasd.ResourceType == 31 and sasd.ResourceSubType == "Microsoft:Hyper-V:Virtual Hard Disk"): - #sasd.PoolId = "" new_resource_setting_data.append(sasd.GetText_(1)) return new_resource_setting_data diff --git a/nova/virt/hyperv/networkutils.py b/nova/virt/hyperv/networkutils.py index 27571485cd..07ad489187 100644 --- a/nova/virt/hyperv/networkutils.py +++ b/nova/virt/hyperv/networkutils.py @@ -48,7 +48,7 @@ def get_external_vswitch(self, vswitch_name): def create_vswitch_port(self, vswitch_path, port_name): switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] - #Create a port on the vswitch. + # Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort( Name=str(uuid.uuid4()), FriendlyName=port_name, diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py index 3b64010c6e..2fa9fe83ac 100644 --- a/nova/virt/hyperv/vif.py +++ b/nova/virt/hyperv/vif.py @@ -78,5 +78,5 @@ def plug(self, instance, vif): self._vmutils.set_nic_connection(vm_name, vif['id'], vswitch_data) def unplug(self, instance, vif): - #TODO(alepilotti) Not implemented + # TODO(alepilotti) Not implemented pass diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 6f12429770..9f72b06b56 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -363,7 +363,7 @@ def destroy(self, instance, network_info=None, block_device_info=None, try: if self._vmutils.vm_exists(instance_name): - #Stop the VM first. + # Stop the VM first. self.power_off(instance) storage = self._vmutils.get_vm_storage_paths(instance_name) diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index aa28df72f2..81d2ed98f6 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -111,7 +111,7 @@ def get_vm_summary_info(self, vm_name): wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS, wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) settings_paths = [v.path_() for v in vmsettings] - #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + # See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( [constants.VM_SUMMARY_NUM_PROCS, constants.VM_SUMMARY_ENABLED_STATE, @@ -309,10 +309,10 @@ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, drive = self._get_new_resource_setting_data(res_sub_type) - #Set the IDE ctrller as parent. + # Set the IDE ctrller as parent. drive.Parent = ctrller_path drive.Address = drive_addr - #Add the cloned disk drive object to the vm. + # Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(drive, vm.path_()) drive_path = new_resources[0] @@ -322,11 +322,11 @@ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, res_sub_type = self._IDE_DVD_RES_SUB_TYPE res = self._get_new_resource_setting_data(res_sub_type) - #Set the new drive as the parent. + # Set the new drive as the parent. res.Parent = drive_path res.Connection = [path] - #Add the new vhd object as a virtual hard disk to the vm. + # Add the new vhd object as a virtual hard disk to the vm. self._add_virt_resource(res, vm.path_()) def create_scsi_controller(self, vm_name): @@ -366,17 +366,17 @@ def _get_nic_data_by_name(self, name): def create_nic(self, vm_name, nic_name, mac_address): """Create a (synthetic) nic and attach it to the vm.""" - #Create a new nic + # Create a new nic new_nic_data = self._get_new_setting_data( self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS) - #Configure the nic + # Configure the nic new_nic_data.ElementName = nic_name new_nic_data.Address = mac_address.replace(':', '') new_nic_data.StaticMacAddress = 'True' new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - #Add the new nic to the vm + # Add the new nic to the vm vm = self._lookup_vm_check(vm_name) self._add_virt_resource(new_nic_data, vm.path_()) @@ -386,8 +386,8 @@ def set_vm_state(self, vm_name, req_state): vm = self._lookup_vm_check(vm_name) (job_path, ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state]) - #Invalid state for current operation (32775) typically means that - #the VM is already in the state requested + # Invalid state for current operation (32775) typically means that + # the VM is already in the state requested self.check_ret_val(ret_val, job_path, [0, 32775]) LOG.debug("Successfully changed vm state of %(vm_name)s " "to %(req_state)s", @@ -430,7 +430,7 @@ def destroy_vm(self, vm_name): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - #Remove the VM. Does not destroy disks. + # Remove the VM. Does not destroy disks. (job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) self.check_ret_val(ret_val, job_path) diff --git a/nova/virt/hyperv/vmutilsv2.py b/nova/virt/hyperv/vmutilsv2.py index ed2c0788cc..61a88291cf 100644 --- a/nova/virt/hyperv/vmutilsv2.py +++ b/nova/virt/hyperv/vmutilsv2.py @@ -104,11 +104,11 @@ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, drive = self._get_new_resource_setting_data(res_sub_type) - #Set the IDE ctrller as parent. + # Set the IDE ctrller as parent. drive.Parent = ctrller_path drive.Address = drive_addr drive.AddressOnParent = drive_addr - #Add the cloned disk drive object to the vm. + # Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(drive, vm.path_()) drive_path = new_resources[0] @@ -157,7 +157,7 @@ def destroy_vm(self, vm_name): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - #Remove the VM. It does not destroy any associated virtual disk. + # Remove the VM. It does not destroy any associated virtual disk. (job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_()) self.check_ret_val(ret_val, job_path) diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index faaea2d2a0..c72af82fee 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -123,18 +123,18 @@ def attach_volume(self, connection_info, instance_name, ebs_root=False): target_lun = data['target_lun'] target_iqn = data['target_iqn'] - #Getting the mounted disk + # Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) if ebs_root: - #Find the IDE controller for the vm. + # Find the IDE controller for the vm. ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) - #Attaching to the first slot + # Attaching to the first slot slot = 0 else: - #Find the SCSI controller for the vm + # Find the SCSI controller for the vm ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._get_free_controller_slot(ctrller_path) @@ -179,7 +179,7 @@ def detach_volume(self, connection_info, instance_name): target_lun = data['target_lun'] target_iqn = data['target_iqn'] - #Getting the mounted disk + # Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) @@ -224,7 +224,7 @@ def _get_mounted_disk_from_lun(self, target_iqn, target_lun, LOG.debug('Device number: %(device_number)s, ' 'target lun: %(target_lun)s', {'device_number': device_number, 'target_lun': target_lun}) - #Finding Mounted disk drive + # Finding Mounted disk drive for i in range(0, CONF.hyperv.volume_attach_retry_count): mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number( device_number) @@ -238,10 +238,10 @@ def _get_mounted_disk_from_lun(self, target_iqn, target_lun, return mounted_disk_path def disconnect_volume(self, physical_drive_path): - #Get the session_id of the ISCSI connection + # Get the session_id of the ISCSI connection session_id = self._volutils.get_session_id_from_mounted_disk( physical_drive_path) - #Logging out the target + # Logging out the target self._volutils.execute_log_out(session_id) def get_target_from_disk_path(self, physical_drive_path): diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py index ccd890daef..05be31af90 100644 --- a/nova/virt/hyperv/volumeutils.py +++ b/nova/virt/hyperv/volumeutils.py @@ -64,7 +64,7 @@ def _login_target_portal(self, target_portal): self.execute('iscsicli.exe', 'RefreshTargetPortal', target_address, target_port) else: - #Adding target portal to iscsi initiator. Sending targets + # Adding target portal to iscsi initiator. Sending targets self.execute('iscsicli.exe', 'AddTargetPortal', target_address, target_port, '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', @@ -74,7 +74,7 @@ def login_storage_target(self, target_lun, target_iqn, target_portal): """Ensure that the target is logged in.""" self._login_target_portal(target_portal) - #Listing targets + # Listing targets self.execute('iscsicli.exe', 'ListTargets') retry_count = CONF.hyperv.volume_attach_retry_count diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index cdf56839be..a4da68d1e9 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1086,7 +1086,7 @@ def cleanup(self, context, instance, network_info, block_device_info=None, if destroy_disks: self._cleanup_lvm(instance) - #NOTE(haomai): destroy volumes if needed + # NOTE(haomai): destroy volumes if needed if CONF.libvirt.images_type == 'rbd': self._cleanup_rbd(instance) @@ -1932,7 +1932,7 @@ def _volume_snapshot_delete(self, context, instance, volume_id, except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) - ##### Find dev name + # Find dev name my_dev = None xml = virt_dom.XMLDesc(0) @@ -3617,7 +3617,7 @@ def _create_domain_setup_lxc(self, instance): container_dir=container_dir, use_cow=CONF.use_cow_images) try: - #Note(GuanQiang): save container root device name here, used for + # Note(GuanQiang): save container root device name here, used for # detaching the linked image device when deleting # the lxc instance. if container_root_device: @@ -4097,7 +4097,7 @@ def _get_device_type(cfgdev): "vendor_id": cfgdev.pci_capability.vendor_id[2:6], } - #requirement by DataBase Model + # requirement by DataBase Model device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device device.update(_get_device_type(cfgdev)) return device @@ -4207,9 +4207,9 @@ def interface_stats(self, instance_name, iface_id): return domain.interfaceStats(iface_id) def get_console_pool_info(self, console_type): - #TODO(mdragon): console proxy should be implemented for libvirt, - # in case someone wants to use it with kvm or - # such. For now return fake data. + # TODO(mdragon): console proxy should be implemented for libvirt, + # in case someone wants to use it with kvm or + # such. For now return fake data. return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} @@ -5038,7 +5038,7 @@ def get_host_cpu_stats(self): def get_host_uptime(self, host): """Returns the result of calling "uptime".""" - #NOTE(dprince): host seems to be ignored for this call and in + # NOTE(dprince): host seems to be ignored for this call and in # other compute drivers as well. Perhaps we should remove it? out, err = utils.execute('env', 'LANG=C', 'uptime') return out @@ -5537,7 +5537,7 @@ def _get_disk_available_least(): disk_info_dict = self.driver._get_local_gb_info() data = {} - #NOTE(dprince): calling capabilities before getVersion works around + # NOTE(dprince): calling capabilities before getVersion works around # an initialization issue with some versions of Libvirt (1.0.5.5). # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116 # See: https://bugs.launchpad.net/nova/+bug/1215593 diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 4caf9e0001..a79934dfa4 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -336,7 +336,7 @@ def copy_raw_image(base, target, size): generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): - #Generating image in place + # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): @@ -463,7 +463,7 @@ def create_lvm_image(base, size): generated = 'ephemeral_size' in kwargs - #Generate images with specified size right on volume + # Generate images with specified size right on volume if generated and size: lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse) diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index af0cd40f97..d772c8eb57 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -531,7 +531,7 @@ def is_mounted(mount_path, source=None): except processutils.ProcessExecutionError as exc: return False except OSError as exc: - #info since it's not required to have this tool. + # info since it's not required to have this tool. if exc.errno == errno.ENOENT: LOG.info(_LI("findmnt tool is not installed")) return False diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index d6aef9d1de..af4dd4922f 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -258,9 +258,9 @@ def connect_volume(self, connection_info, disk_info): iscsi_properties = connection_info['data'] if self.use_multipath: - #multipath installed, discovering other targets if available - #multipath should be configured on the nova-compute node, - #in order to fit storage vendor + # multipath installed, discovering other targets if available + # multipath should be configured on the nova-compute node, + # in order to fit storage vendor out = self._run_iscsiadm_bare(['-m', 'discovery', '-t', @@ -312,7 +312,7 @@ def connect_volume(self, connection_info, disk_info): 'tries': tries}) if self.use_multipath: - #we use the multipath device instead of the single path device + # we use the multipath device instead of the single path device self._rescan_multipath() multipath_device = self._get_multipath_device_name(host_device) @@ -465,8 +465,8 @@ def _connect_to_iscsi_portal(self, iscsi_properties): "node.session.auth.password", iscsi_properties['auth_password']) - #duplicate logins crash iscsiadm after load, - #so we scan active sessions to see if the node is logged in. + # duplicate logins crash iscsiadm after load, + # so we scan active sessions to see if the node is logged in. out = self._run_iscsiadm_bare(["-m", "session"], run_as_root=True, check_exit_code=[0, 1, 21])[0] or "" @@ -487,8 +487,8 @@ def _connect_to_iscsi_portal(self, iscsi_properties): ("--login",), check_exit_code=[0, 255]) except processutils.ProcessExecutionError as err: - #as this might be one of many paths, - #only set successful logins to startup automatically + # as this might be one of many paths, + # only set successful logins to startup automatically if err.exit_code in [15]: self._iscsiadm_update(iscsi_properties, "node.startup", @@ -730,7 +730,7 @@ def connect_volume(self, connection_info, mount_device): # NOTE(jbr_): If aoedevpath does not exist, do a discover. self._aoe_discover() - #NOTE(jbr_): Device path is not always present immediately + # NOTE(jbr_): Device path is not always present immediately def _wait_for_device_discovery(aoedevpath, mount_device): tries = self.tries if os.path.exists(aoedevpath): diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index d5b7e4cc72..b7ff03bfee 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -136,7 +136,7 @@ def __init__(self, virtapi, read_only=False, scheme="https"): self._host = host.Host(self._session) self._host_state = None - #TODO(hartsocks): back-off into a configuration test module. + # TODO(hartsocks): back-off into a configuration test module. if CONF.vmware.use_linked_clone is None: raise error_util.UseLinkedCloneConfigurationFault() diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a9dacd134e..d0e7864866 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1048,7 +1048,7 @@ def propset_dict(propset): if propset is None: return {} - #TODO(hartsocks): once support for Python 2.6 is dropped + # TODO(hartsocks): once support for Python 2.6 is dropped # change to {[(prop.name, prop.val) for prop in propset]} return dict([(prop.name, prop.val) for prop in propset]) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index a726e1a3cb..b03cb4c3fa 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -202,7 +202,7 @@ def spawn(self, context, instance, image_meta, injected_files, datastore_regex=self._datastore_regex) dc_info = self.get_datacenter_ref_and_name(datastore.ref) - #TODO(hartsocks): this pattern is confusing, reimplement as methods + # TODO(hartsocks): this pattern is confusing, reimplement as methods # The use of nested functions in this file makes for a confusing and # hard to maintain file. At some future date, refactor this method to # be a full-fledged method. This will also make unit testing easier. @@ -1317,7 +1317,7 @@ def get_instance_diagnostics(self, instance): uptime=uptime) diags.memory_details.maximum = data.get('memorySizeMB', 0) diags.memory_details.used = data.get('guestMemoryUsage', 0) - #TODO(garyk): add in cpu, nic and disk stats + # TODO(garyk): add in cpu, nic and disk stats return diags def _get_vnc_console_connection(self, instance): diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py index 00d9e27389..17480dc1f4 100644 --- a/nova/virt/xenapi/agent.py +++ b/nova/virt/xenapi/agent.py @@ -338,7 +338,7 @@ def inject_file(self, path, contents): def resetnetwork(self): LOG.debug('Resetting network', instance=self.instance) - #NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success + # NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success return self._call_agent('resetnetwork', timeout=CONF.xenserver.agent_resetnetwork_timeout, success_codes=['0', '500']) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index df243f2558..03b8b269eb 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -12,8 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. # -#============================================================================ -# + + # Parts of this file are based upon xmlrpclib.py, the XML-RPC client # interface included in the Python distribution. # @@ -630,7 +630,7 @@ def VDI_clone(self, _1, vdi_to_clone_ref): return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref) def host_compute_free_memory(self, _1, ref): - #Always return 12GB available + # Always return 12GB available return 12 * units.Gi def _plugin_agent_version(self, method, args): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f200937668..d22d1e07ce 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1124,7 +1124,7 @@ def generate_single_ephemeral(session, instance, vm_ref, userdevice, instance_name_label = instance["name"] name_label = "%s ephemeral" % instance_name_label - #TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here + # TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here label_number = int(userdevice) - 4 if label_number > 0: name_label = "%s (%d)" % (name_label, label_number) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b71e48946d..4d276799ed 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -266,7 +266,7 @@ def null_step_decorator(f): def create_disks_step(undo_mgr, disk_image_type, image_meta, name_label): - #TODO(johngarbutt) clean up if this is not run + # TODO(johngarbutt) clean up if this is not run vdis = vm_utils.import_all_migrated_disks(self._session, instance) @@ -994,7 +994,7 @@ def power_down_and_transfer_leaf_vhds(root_vdi_uuid, instance=instance) try: self._restore_orig_vm_and_cleanup_orphan(instance) - #TODO(johngarbutt) should also cleanup VHDs at destination + # TODO(johngarbutt) should also cleanup VHDs at destination except Exception as rollback_error: LOG.warn(_("_migrate_disk_resizing_up failed to " "rollback: %s"), rollback_error, diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 90d2edab62..26607eaed9 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -37,7 +37,7 @@ def __init__(self, session): def attach_volume(self, connection_info, instance_name, mountpoint, hotplug=True): """Attach volume to VM instance.""" - #TODO(johngarbutt) move this into _attach_volume_to_vm + # TODO(johngarbutt) move this into _attach_volume_to_vm dev_number = volume_utils.get_device_number(mountpoint) vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py index cbeea5884f..2fbef0e6c9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py @@ -31,7 +31,7 @@ _ = translations.ugettext -##### Logging setup +# Logging setup def configure_logging(name): log = logging.getLogger() @@ -43,7 +43,7 @@ def configure_logging(name): log.addHandler(sysh) -##### Exceptions +# Exceptions class PluginError(Exception): """Base Exception class for all plugin errors.""" @@ -59,7 +59,7 @@ def __init__(self, *args): PluginError.__init__(self, *args) -##### Argument validation +# Argument validation def exists(args, key): """Validates that a freeform string argument to a RPC method call is given. diff --git a/tox.ini b/tox.ini index 2bcec9b530..609f52e79d 100644 --- a/tox.ini +++ b/tox.ini @@ -55,10 +55,10 @@ sitepackages = False # H803 skipped on purpose per list discussion. # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # The rest of the ignores are TODOs -# New from hacking 0.9: E129, E131, E265, H407, H405, H904 +# New from hacking 0.9: E129, E131, H407, H405, H904 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,E265,H405,H803,H904 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,H803,H904 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools [hacking] From 5a21711be4444690e9f5ab442be08d15e725ca8e Mon Sep 17 00:00:00 2001 From: Forest Romain Date: Thu, 24 Jul 2014 15:00:04 +0200 Subject: [PATCH 161/486] Clean nova.compute.resource_tracker:_update_usage_from_instances Use cleaner if else statement. Change-Id: I287eae65873e34cc115768fd5361cd56e6444fc4 --- nova/compute/resource_tracker.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index d1eb96cf72..4876a5b1e7 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -645,9 +645,7 @@ def _update_usage_from_instances(self, resources, instances): self.ext_resources_handler.reset_resources(resources, self.driver) for instance in instances: - if instance['vm_state'] == vm_states.DELETED: - continue - else: + if instance['vm_state'] != vm_states.DELETED: self._update_usage_from_instance(resources, instance) def _find_orphaned_instances(self): From 6d49299ed2dffb33f0ccc36b792c756056664034 Mon Sep 17 00:00:00 2001 From: jichenjc Date: Mon, 21 Jul 2014 22:57:59 +0800 Subject: [PATCH 162/486] Move logs of restore state to inner logic Currently when we rebuild instance, we will restore instance state if it's STOPPED, but we will log it whenever it's STOPPED or not. This may confusing operator and for ACTIVE/ERROR state, it's useless. Also, adds instance into log to provide more info. Change-Id: I0685484ee0b56c250071c8c9322d2ed053449fd3 --- nova/compute/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f1e744f340..8f21318f72 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2651,8 +2651,9 @@ def detach_block_devices(context, bdms): instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=[task_states.REBUILD_SPAWNING]) - LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state) if orig_vm_state == vm_states.STOPPED: + LOG.info(_LI("bringing vm to original state: '%s'"), + orig_vm_state, instance=instance) instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.progress = 0 From 894fbae8ec4f22a71b3021c1b695dc0e4975aa96 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 23 Jul 2014 17:47:37 +0100 Subject: [PATCH 163/486] libvirt: remove VIF driver classes deprecated in Icehouse In Icehouse the old VIF driver classes were set as deprecated to be removed in Juno. Now they can be finally deleted. Related-bug: #1302796 Change-Id: I7be735062e6cc6aa54dec1bf43d3cce1ac544988 --- nova/virt/libvirt/vif.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 108dc1aabd..eda546eeec 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -24,7 +24,6 @@ from nova import exception from nova.i18n import _ from nova.i18n import _LE -from nova.i18n import _LW from nova.network import linux_net from nova.network import model as network_model from nova.openstack.common import log as logging @@ -758,36 +757,3 @@ def unplug(self, instance, vif): raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type) func(instance, vif) - -# The following classes were removed in the transition from Havana to -# Icehouse, but may still be referenced in configuration files. The -# following stubs allow those configurations to work while logging a -# deprecation warning. - - -class _LibvirtDeprecatedDriver(LibvirtGenericVIFDriver): - def __init__(self, *args, **kwargs): - LOG.warn(_LW('VIF driver \"%s\" is marked as deprecated and will be ' - 'removed in the Juno release.'), - self.__class__.__name__) - super(_LibvirtDeprecatedDriver, self).__init__(*args, **kwargs) - - -class LibvirtBridgeDriver(_LibvirtDeprecatedDriver): - pass - - -class LibvirtOpenVswitchDriver(_LibvirtDeprecatedDriver): - pass - - -class LibvirtHybridOVSBridgeDriver(_LibvirtDeprecatedDriver): - pass - - -class LibvirtOpenVswitchVirtualPortDriver(_LibvirtDeprecatedDriver): - pass - - -class NeutronLinuxBridgeVIFDriver(_LibvirtDeprecatedDriver): - pass From 18917d9e0d6fe250cc3b4f7301d37a6c5f5faffb Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 6 Apr 2014 05:54:23 -0700 Subject: [PATCH 164/486] Scheduler: throw exception if no configured affinity filter If the scheduler hint indicates that the scheduling should perform either anti-affinity or affinity scheduling and the relevant filter is not configured then a NoValidHost exception will be thrown. This is valuable if an existing OpenStack installation is running and these filters are not defined after an upgrade. Change-Id: I79bb44ad7481b3ff924687a8d6afdd6c715c0b59 Closes-bug: #1302238 --- nova/scheduler/filter_scheduler.py | 17 +++++- nova/scheduler/utils.py | 5 ++ nova/tests/scheduler/test_filter_scheduler.py | 59 ++++++++++++++++--- nova/tests/scheduler/test_scheduler_utils.py | 6 ++ 4 files changed, 76 insertions(+), 11 deletions(-) diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 35220ea64c..cd0e5654f9 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -61,6 +61,10 @@ def __init__(self, *args, **kwargs): self.options = scheduler_options.SchedulerOptions() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.notifier = rpc.get_notifier('scheduler') + self._supports_affinity = scheduler_utils.validate_filter( + 'ServerGroupAffinityFilter') + self._supports_anti_affinity = scheduler_utils.validate_filter( + 'ServerGroupAntiAffinityFilter') # NOTE(alaski): Remove this method when the scheduler rpc interface is # bumped to 4.x as it is no longer used. @@ -202,8 +206,7 @@ def populate_filter_properties(self, request_spec, filter_properties): if pci_requests: filter_properties['pci_requests'] = pci_requests - @staticmethod - def _setup_instance_group(context, filter_properties): + def _setup_instance_group(self, context, filter_properties): update_group_hosts = False scheduler_hints = filter_properties.get('scheduler_hints') or {} group_hint = scheduler_hints.get('group', None) @@ -211,6 +214,16 @@ def _setup_instance_group(context, filter_properties): group = objects.InstanceGroup.get_by_hint(context, group_hint) policies = set(('anti-affinity', 'affinity')) if any((policy in policies) for policy in group.policies): + if ('affinity' in group.policies and + not self._supports_affinity): + msg = _("ServerGroupAffinityFilter not configured") + LOG.error(msg) + raise exception.NoValidHost(reason=msg) + if ('anti-affinity' in group.policies and + not self._supports_anti_affinity): + msg = _("ServerGroupAntiAffinityFilter not configured") + LOG.error(msg) + raise exception.NoValidHost(reason=msg) update_group_hosts = True filter_properties.setdefault('group_hosts', set()) user_hosts = set(filter_properties['group_hosts']) diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py index 2e7f4b67c5..77274fc0a0 100644 --- a/nova/scheduler/utils.py +++ b/nova/scheduler/utils.py @@ -235,3 +235,8 @@ def parse_options(opts, sep='=', converter=str, name=""): {'name': name, 'options': ", ".join(bad)}) return good + + +def validate_filter(filter): + """Validates that the filter is configured in the default filters.""" + return filter in CONF.scheduler_default_filters diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index 7551a7eec5..aff3354a6b 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -371,7 +371,7 @@ def test_post_select_populate(self): self.assertEqual({'vcpus': 5}, host_state.limits) - def _create_server_group(self): + def _create_server_group(self, policy='anti-affinity'): instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) @@ -379,10 +379,11 @@ def _create_server_group(self): group.name = 'pele' group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] - group.policies = ['anti-affinity'] + group.policies = [policy] return group - def _test_group_details_in_filter_properties(self, group, func, hint): + def _group_details_in_filter_properties(self, group, func='get_by_uuid', + hint=None, policy=None): sched = fakes.FakeFilterScheduler() filter_properties = { @@ -397,23 +398,63 @@ def _test_group_details_in_filter_properties(self, group, func, hint): mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): + sched._supports_anti_affinity = True update_group_hosts = sched._setup_instance_group(self.context, filter_properties) self.assertTrue(update_group_hosts) self.assertEqual(set(['hostA', 'hostB']), filter_properties['group_hosts']) - self.assertEqual(['anti-affinity'], - filter_properties['group_policies']) + self.assertEqual([policy], filter_properties['group_policies']) + + def test_group_details_in_filter_properties(self): + for policy in ['affinity', 'anti-affinity']: + group = self._create_server_group(policy) + self._group_details_in_filter_properties(group, func='get_by_uuid', + hint=group.uuid, + policy=policy) + + def _group_filter_with_filter_not_configured(self, policy): + self.flags(scheduler_default_filters=['f1', 'f2']) + sched = fakes.FakeFilterScheduler() + + instance = fake_instance.fake_instance_obj(self.context, + params={'host': 'hostA'}) + + group = objects.InstanceGroup() + group.uuid = str(uuid.uuid4()) + group.members = [instance.uuid] + group.policies = [policy] + + filter_properties = { + 'scheduler_hints': { + 'group': group.uuid, + }, + } + + with contextlib.nested( + mock.patch.object(objects.InstanceGroup, 'get_by_uuid', + return_value=group), + mock.patch.object(objects.InstanceGroup, 'get_hosts', + return_value=['hostA']), + ) as (get_group, get_hosts): + self.assertRaises(exception.NoValidHost, + sched._setup_instance_group, self.context, + filter_properties) + + def test_group_filter_with_filter_not_configured(self): + policies = ['anti-affinity', 'affinity'] + for policy in policies: + self._group_filter_with_filter_not_configured(policy) def test_group_uuid_details_in_filter_properties(self): group = self._create_server_group() - self._test_group_details_in_filter_properties(group, 'get_by_uuid', - group.uuid) + self._group_details_in_filter_properties(group, 'get_by_uuid', + group.uuid, 'anti-affinity') def test_group_name_details_in_filter_properties(self): group = self._create_server_group() - self._test_group_details_in_filter_properties(group, 'get_by_name', - group.name) + self._group_details_in_filter_properties(group, 'get_by_name', + group.name, 'anti-affinity') def test_schedule_host_pool(self): """Make sure the scheduler_host_subset_size property works properly.""" diff --git a/nova/tests/scheduler/test_scheduler_utils.py b/nova/tests/scheduler/test_scheduler_utils.py index e7a391b033..4613a419f2 100644 --- a/nova/tests/scheduler/test_scheduler_utils.py +++ b/nova/tests/scheduler/test_scheduler_utils.py @@ -215,3 +215,9 @@ def test_parse_options(self): '=', float, [('bar', -2.1)]) + + def test_validate_filters_configured(self): + self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2') + self.assertTrue(scheduler_utils.validate_filter('FakeFilter1')) + self.assertTrue(scheduler_utils.validate_filter('FakeFilter2')) + self.assertFalse(scheduler_utils.validate_filter('FakeFilter3')) From 48de2895b9a550a0944b31212349275605a4061d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 24 Jul 2014 07:35:27 -0700 Subject: [PATCH 165/486] Fix FloatingIP.save() passing FixedIP object to sqlalchemy This prevents the FloatingIP.save() method from passing the calculated FixedIP object to the sqlalchemy floating_ip_update() function, which would expect it to be an SA object. It also aborts any attempt to save the object with a modified fixed_ip_id linkage, as associate/disassociate should be used for that. This also fixes a bug where FloatingIP expects the result of floating_ip_update() to be a FloatingIp SA object. Change-Id: I065caedf4d81c8583a3b390934a1d403cf2e87bd Closes-bug: #1334164 --- nova/db/sqlalchemy/api.py | 1 + nova/objects/floating_ip.py | 8 ++++++++ nova/tests/db/test_db_api.py | 4 +++- nova/tests/objects/test_floating_ip.py | 21 ++++++++++++++++++++- 4 files changed, 32 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f9d62a632d..7603a55cde 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1050,6 +1050,7 @@ def floating_ip_update(context, address, values): float_ip_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.FloatingIpExists(address=values['address']) + return float_ip_ref def _dnsdomain_get(context, session, fqdomain): diff --git a/nova/objects/floating_ip.py b/nova/objects/floating_ip.py index d74ba424e7..d057f40a0b 100644 --- a/nova/objects/floating_ip.py +++ b/nova/objects/floating_ip.py @@ -136,6 +136,14 @@ def save(self, context): if 'address' in updates: raise exception.ObjectActionError(action='save', reason='address is not mutable') + if 'fixed_ip_id' in updates: + reason = 'fixed_ip_id is not mutable' + raise exception.ObjectActionError(action='save', reason=reason) + + # NOTE(danms): Make sure we don't pass the calculated fixed_ip + # relationship to the DB update method + updates.pop('fixed_ip', None) + db_floatingip = db.floating_ip_update(context, str(self.address), updates) self._from_db_object(context, self, db_floatingip) diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index adaf68fc1f..083428c436 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -4092,7 +4092,9 @@ def test_floating_ip_update(self): 'interface': 'some_interface', 'pool': 'some_pool' } - db.floating_ip_update(self.ctxt, float_ip['address'], values) + floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'], + values) + self.assertIsNot(floating_ref, None) updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id']) self._assertEqualObjects(updated_float_ip, values, ignored_keys=['id', 'address', 'updated_at', diff --git a/nova/tests/objects/test_floating_ip.py b/nova/tests/objects/test_floating_ip.py index a48756ef03..0f3d6fd593 100644 --- a/nova/tests/objects/test_floating_ip.py +++ b/nova/tests/objects/test_floating_ip.py @@ -130,13 +130,32 @@ def test_save(self, update): floatingip = floating_ip.FloatingIP(context=self.context, id=123, address='1.2.3.4', host='foo') - self.assertRaises(exception.ObjectActionError, floatingip.save) floatingip.obj_reset_changes(['address', 'id']) floatingip.save() self.assertEqual(set(), floatingip.obj_what_changed()) update.assert_called_with(self.context, '1.2.3.4', {'host': 'foo'}) + def test_save_errors(self): + floatingip = floating_ip.FloatingIP(context=self.context, + id=123, host='foo') + floatingip.obj_reset_changes() + floating_ip.address = '1.2.3.4' + self.assertRaises(exception.ObjectActionError, floatingip.save) + + floatingip.obj_reset_changes() + floatingip.fixed_ip_id = 1 + self.assertRaises(exception.ObjectActionError, floatingip.save) + + @mock.patch('nova.db.floating_ip_update') + def test_save_no_fixedip(self, update): + update.return_value = fake_floating_ip + floatingip = floating_ip.FloatingIP(context=self.context, + id=123) + floatingip.fixed_ip = objects.FixedIP(context=self.context, + id=456) + self.assertNotIn('fixed_ip', update.calls[1]) + @mock.patch('nova.db.floating_ip_get_all') def test_get_all(self, get): get.return_value = [fake_floating_ip] From 038857d5455aca07b521a681e1315decf0a6d8f6 Mon Sep 17 00:00:00 2001 From: Phil Day Date: Thu, 24 Jul 2014 17:08:58 +0000 Subject: [PATCH 166/486] Fix the i18n for some warnings in compute utils Change-Id: I82a5602bdbecb931c046bb693f16917f38a075fb --- nova/compute/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 8836f5039d..81922bcda1 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -25,7 +25,7 @@ from nova.compute import power_state from nova.compute import task_states from nova import exception -from nova.i18n import _ +from nova.i18n import _LW from nova.network import model as network_model from nova import notifications from nova import objects @@ -201,7 +201,7 @@ def get_image_metadata(context, image_api, image_id_or_uri, instance): except (exception.ImageNotAuthorized, exception.ImageNotFound, exception.Invalid) as e: - LOG.warning(_("Can't access image %(image_id)s: %(error)s"), + LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"), {"image_id": image_id_or_uri, "error": e}, instance=instance) image_system_meta = {} @@ -325,7 +325,7 @@ def notify_about_host_update(context, event_suffix, host_payload): """ host_identifier = host_payload.get('host_name') if not host_identifier: - LOG.warn(_("No host name specified for the notification of " + LOG.warn(_LW("No host name specified for the notification of " "HostAPI.%s and it will be ignored"), event_suffix) return @@ -453,7 +453,7 @@ def periodic_task_spacing_warn(config_option_name): def wrapper(f): if (hasattr(f, "_periodic_spacing") and (f._periodic_spacing == 0 or f._periodic_spacing is None)): - LOG.warning(_("Value of 0 or None specified for %s." + LOG.warning(_LW("Value of 0 or None specified for %s." " This behaviour will change in meaning in the K release, to" " mean 'call at the default rate' rather than 'do not call'." " To keep the 'do not call' behaviour, use a negative value."), From a78bf3ab6cd921334abd5a15bba52aad3c2af746 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 24 Jul 2014 12:13:50 -0500 Subject: [PATCH 167/486] Remove unused cell_scheduler_method This key isn't used anywhere in the code (it's use was removed a while ago). Change-Id: Icf215b5dd0c7b93b336b6bb030f3a9282a0b05bd --- nova/tests/cells/test_cells_filters.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/tests/cells/test_cells_filters.py b/nova/tests/cells/test_cells_filters.py index 851a871287..01ad3580ff 100644 --- a/nova/tests/cells/test_cells_filters.py +++ b/nova/tests/cells/test_cells_filters.py @@ -163,8 +163,7 @@ def _fake_build_instances(ctxt, cell, sched_kwargs): 'routing_path': current_cell, 'scheduler': self.scheduler, 'context': self.context, - 'host_sched_kwargs': 'meow', - 'cell_scheduler_method': 'build_instances'} + 'host_sched_kwargs': 'meow'} # None is returned to bypass further scheduling. self.assertIsNone(self._filter_cells(cells, filter_props)) # The filter should have re-scheduled to the child cell itself. From 3c88fce604959a68f48d71274e0d93b74da17e34 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Mon, 21 Apr 2014 09:15:56 -0700 Subject: [PATCH 168/486] Use hypervisor hostname for compute trust level In XenAPI, service hostname and compute node hostname is different because the Nova compute service may run in a separated VM and is different with the hostname of the compute node. The remote attestation service use the compute node's hostname because it's the compute node that will run the servers. Closes-Bug: #1223452 Change-Id: I9a7ce74d595531196804615a8947e253b0bd3f1a --- nova/scheduler/filters/trusted_filter.py | 9 ++---- nova/tests/scheduler/test_host_filters.py | 39 +++++++++++++++++++---- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index bd0f41b26c..d15290a2a7 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -50,7 +50,6 @@ from nova import context from nova import db -from nova.i18n import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils @@ -203,11 +202,7 @@ def __init__(self): # host in the first round that scheduler invokes us. computes = db.compute_node_get_all(admin) for compute in computes: - service = compute['service'] - if not service: - LOG.warn(_("No service for compute ID %s") % compute['id']) - continue - host = service['host'] + host = compute['hypervisor_hostname'] self._init_cache_entry(host) def _cache_valid(self, host): @@ -284,7 +279,7 @@ def host_passes(self, host_state, filter_properties): instance_type = filter_properties.get('instance_type', {}) extra = instance_type.get('extra_specs', {}) trust = extra.get('trust:trusted_host') - host = host_state.host + host = host_state.nodename if trust: return self.compute_attestation.is_trusted(host, trust) return True diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index eb4de0cb29..41db7f2217 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -17,6 +17,7 @@ import httplib +import mock from oslo.config import cfg import stubout @@ -243,6 +244,7 @@ class HostFiltersTestCase(test.NoDBTestCase): def fake_oat_request(self, *args, **kwargs): """Stubs out the response from OAT service.""" self.oat_attested = True + self.oat_hosts = args[2] return httplib.OK, self.oat_data def setUp(self): @@ -1292,7 +1294,7 @@ def test_trusted_filter_default_passes(self): self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_trusted_and_trusted_passes(self): - self.oat_data = {"hosts": [{"host_name": "host1", + self.oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "trusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) @@ -1305,7 +1307,7 @@ def test_trusted_filter_trusted_and_trusted_passes(self): self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_trusted_and_untrusted_fails(self): - self.oat_data = {"hosts": [{"host_name": "host1", + self.oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) @@ -1318,7 +1320,7 @@ def test_trusted_filter_trusted_and_untrusted_fails(self): self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_untrusted_and_trusted_fails(self): - self.oat_data = {"hosts": [{"host_name": "host1", + self.oat_data = {"hosts": [{"host_name": "node", "trust_lvl": "trusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) @@ -1331,7 +1333,7 @@ def test_trusted_filter_untrusted_and_trusted_fails(self): self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_untrusted_and_untrusted_passes(self): - self.oat_data = {"hosts": [{"host_name": "host1", + self.oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) @@ -1344,8 +1346,8 @@ def test_trusted_filter_untrusted_and_untrusted_passes(self): self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_update_cache(self): - self.oat_data = {"hosts": [{"host_name": - "host1", "trust_lvl": "untrusted", + self.oat_data = {"hosts": [{"host_name": "node1", + "trust_lvl": "untrusted", "vtime": timeutils.isotime()}]} filt_cls = self.class_map['TrustedFilter']() @@ -1372,7 +1374,7 @@ def test_trusted_filter_update_cache(self): timeutils.clear_time_override() def test_trusted_filter_update_cache_timezone(self): - self.oat_data = {"hosts": [{"host_name": "host1", + self.oat_data = {"hosts": [{"host_name": "node1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00"}]} @@ -1401,6 +1403,29 @@ def test_trusted_filter_update_cache_timezone(self): timeutils.clear_time_override() + @mock.patch('nova.db.compute_node_get_all') + def test_trusted_filter_combine_hosts(self, mockdb): + self.oat_data = {"hosts": [{"host_name": "node1", + "trust_lvl": "untrusted", + "vtime": "2012-09-09T05:10:40-04:00"}]} + fake_compute_nodes = [ + {'hypervisor_hostname': 'node1', + 'service': {'host': 'host1'}, + }, + {'hypervisor_hostname': 'node2', + 'service': {'host': 'host2'}, + }, ] + mockdb.return_value = fake_compute_nodes + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trust:trusted_host': 'trusted'} + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'node1', {}) + + filt_cls.host_passes(host, filter_properties) # Fill the caches + self.assertEqual(set(self.oat_hosts), set(['node1', 'node2'])) + def test_core_filter_passes(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} From aa300fcbfd88f984dce53d853d255270b73b5dbd Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 22 Jul 2014 15:35:34 -0700 Subject: [PATCH 169/486] VMware: remove local variable Make use of the instance.root_gb instead of reading the value into a local variable. TrivialFix Change-Id: I2ae4a9d2f5383ff05f47d00550c1c917fedc5db9 --- nova/virt/vmwareapi/vmops.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 4315d20899..2bb2520ee6 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -239,8 +239,7 @@ def _get_image_properties(root_size): return (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, image_linked_clone) - root_gb = instance.root_gb - root_gb_in_kb = root_gb * units.Mi + root_gb_in_kb = instance.root_gb * units.Mi (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, image_linked_clone) = _get_image_properties(root_gb_in_kb) @@ -459,7 +458,7 @@ def _get_image_properties(root_size): root_vmdk_path, dc_info.ref) else: upload_folder = '%s/%s' % (self._base_folder, upload_name) - if root_gb: + if instance.root_gb: root_vmdk_name = "%s.%s.vmdk" % (upload_name, instance.root_gb) else: From 099aad2c3f8887fb9c7c1e81cf4239a104227f48 Mon Sep 17 00:00:00 2001 From: Vladik Romanovsky Date: Fri, 13 Jun 2014 10:11:08 -0400 Subject: [PATCH 170/486] Method to filter non-root block device mappings Adding a generator that would provide a non-root block device mappings, when it's optional variable exclude_root_mapping is set to true. Otherwise, all mappings will be returned. The method will be used to handle LXC volumes, as it's root FS should be handled differently. Change-Id: I879916021c3b61f19dd69ff11838dbbac19f72d1 Related-Bug: #1269990 --- nova/block_device.py | 8 ++++++++ nova/tests/test_block_device.py | 11 +++++++++++ 2 files changed, 19 insertions(+) diff --git a/nova/block_device.py b/nova/block_device.py index b0e048d6c5..3cb123bd68 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -422,6 +422,14 @@ def get_root_bdm(bdms): return None +def get_bdms_to_connect(bdms, exclude_root_mapping=False): + """Will return non-root mappings, when exclude_root_mapping is true. + Otherwise all mappings will be returned. + """ + return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or + not exclude_root_mapping) + + def mappings_prepend_dev(mappings): """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type.""" for m in mappings: diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py index 6fc8604e93..20c249350a 100644 --- a/nova/tests/test_block_device.py +++ b/nova/tests/test_block_device.py @@ -230,6 +230,17 @@ def test_validate_and_default_volume_size(self): block_device.validate_and_default_volume_size, bdm) + def test_get_bdms_to_connect(self): + root_bdm = {'device_name': 'vda', 'boot_index': 0} + bdms = [root_bdm, + {'device_name': 'vdb', 'boot_index': 1}, + {'device_name': 'vdc', 'boot_index': -1}, + {'device_name': 'vde', 'boot_index': None}, + {'device_name': 'vdd'}] + self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms, + exclude_root_mapping=True)) + self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms)) + class TestBlockDeviceDict(test.NoDBTestCase): def setUp(self): From 54afa0683fb4e7bb259ffd3c810f0e767114b221 Mon Sep 17 00:00:00 2001 From: Vladik Romanovsky Date: Thu, 26 Jun 2014 17:46:43 -0400 Subject: [PATCH 171/486] libvirt: removing lxc specific disk mapping Currently, LXC specific disk mapping is being created, preventing the attached volumes from being added to the LXC disk mapping. Removing this part, to allow volumes to be included in mapping. Related-Bug: #1269990 Change-Id: I97bcff558b6d8876a10adb36c19ab3f4c972a708 --- nova/tests/virt/libvirt/test_blockinfo.py | 1 + nova/virt/libvirt/blockinfo.py | 17 ----------------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/nova/tests/virt/libvirt/test_blockinfo.py b/nova/tests/virt/libvirt/test_blockinfo.py index 62f8589362..9fec83ee4a 100644 --- a/nova/tests/virt/libvirt/test_blockinfo.py +++ b/nova/tests/virt/libvirt/test_blockinfo.py @@ -205,6 +205,7 @@ def test_get_disk_mapping_lxc(self): # A simple disk mapping setup, but for lxc user_context = context.RequestContext(self.user_id, self.project_id) + self.test_instance['ephemeral_gb'] = 0 instance_ref = db.instance_create(user_context, self.test_instance) mapping = blockinfo.get_disk_mapping("lxc", instance_ref, diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py index 27800eceed..d83bee8ecb 100644 --- a/nova/virt/libvirt/blockinfo.py +++ b/nova/virt/libvirt/blockinfo.py @@ -501,23 +501,6 @@ def get_disk_mapping(virt_type, instance, driver.block_device_info_get_mapping(block_device_info)) if get_device_name(bdm)] - if virt_type == "lxc": - # NOTE(zul): This information is not used by the libvirt driver - # however we need to populate mapping so the image can be - # created when the instance is started. This can - # be removed when we convert LXC to use block devices. - root_disk_bus = disk_bus - root_device_type = 'disk' - - root_info = get_next_disk_info(mapping, - root_disk_bus, - root_device_type, - boot_index=1) - mapping['root'] = root_info - mapping['disk'] = root_info - - return mapping - if rescue: rescue_info = get_next_disk_info(mapping, disk_bus, boot_index=1) From 2e6b2404155156ca336dadeacc8874645ca07bfc Mon Sep 17 00:00:00 2001 From: Thang Pham Date: Mon, 30 Jun 2014 23:17:35 -0400 Subject: [PATCH 172/486] API: Enable support for tenant option in nova absolute-limits When querying for the absolute limits of a specific tenant, the tenant option is ignored. There are no attempts to extract the tenant from the request. Instead, nova uses context.project_id as the project_id in QUOTAS.get_project_quotas. The following patch extracts the tenant_id from the request (if any) and passes that to QUOTAS.get_project_quotas to obtain the proper quota. Change-Id: If5f91de020ed8a40fa04fc001c7c4c92681f4ad1 Closes-Bug: #1334278 --- nova/api/openstack/compute/limits.py | 3 +- .../api/openstack/compute/test_limits.py | 37 ++++++++++++++++--- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py index 655934723b..0b52af405d 100644 --- a/nova/api/openstack/compute/limits.py +++ b/nova/api/openstack/compute/limits.py @@ -91,7 +91,8 @@ class LimitsController(object): def index(self, req): """Return all global and rate limit information.""" context = req.environ['nova.context'] - quotas = QUOTAS.get_project_quotas(context, context.project_id, + project_id = req.params.get('tenant_id', context.project_id) + quotas = QUOTAS.get_project_quotas(context, project_id, usages=False) abs_limits = dict((k, v['limit']) for k, v in quotas.items()) rate_limits = req.environ.get("nova.limits", []) diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py index 18ca528779..7d4f53574b 100644 --- a/nova/tests/api/openstack/compute/test_limits.py +++ b/nova/tests/api/openstack/compute/test_limits.py @@ -22,6 +22,7 @@ from xml.dom import minidom from lxml import etree +import mock import webob from nova.api.openstack.compute import limits @@ -81,9 +82,13 @@ def setUp(self): self.controller = limits.create_resource() self.ctrler = limits.LimitsController() - def _get_index_request(self, accept_header="application/json"): + def _get_index_request(self, accept_header="application/json", + tenant_id=None): """Helper to set routing arguments.""" request = webob.Request.blank("/") + if tenant_id: + request = webob.Request.blank("/?tenant_id=%s" % tenant_id) + request.accept = accept_header request.environ["wsgiorg.routing_args"] = (None, { "action": "index", @@ -118,8 +123,18 @@ def test_empty_index_json(self): self.assertEqual(expected, body) def test_index_json(self): + self._test_index_json() + + def test_index_json_by_tenant(self): + self._test_index_json('faketenant') + + def _test_index_json(self, tenant_id=None): # Test getting limit details in JSON. - request = self._get_index_request() + request = self._get_index_request(tenant_id=tenant_id) + context = request.environ["nova.context"] + if tenant_id is None: + tenant_id = context.project_id + request = self._populate_limits(request) self.absolute_limits = { 'ram': 512, @@ -130,7 +145,6 @@ def test_index_json(self): 'security_groups': 10, 'security_group_rules': 20, } - response = request.get_response(self.controller) expected = { "limits": { "rate": [ @@ -180,8 +194,21 @@ def test_index_json(self): }, }, } - body = jsonutils.loads(response.body) - self.assertEqual(expected, body) + + def _get_project_quotas(context, project_id, usages=True): + return dict((k, dict(limit=v)) + for k, v in self.absolute_limits.items()) + + with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \ + get_project_quotas: + get_project_quotas.side_effect = _get_project_quotas + + response = request.get_response(self.controller) + + body = jsonutils.loads(response.body) + self.assertEqual(expected, body) + get_project_quotas.assert_called_once_with(context, tenant_id, + usages=False) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" From 842b2abfe76dede55b3b61ebaad5a90c356c5ace Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 25 Nov 2013 14:00:25 +0000 Subject: [PATCH 173/486] Increase min required libvirt to 0.9.11 Increase the min required libvirt version to 0.9.11 since we require that for libvirt-python from PyPI to build successfully. Kill off the legacy CPU model configuration and legacy OpenVSwitch setup code paths only required by libvirt < 0.9.11 Closes-Bug: #1254727 DocImpact Change-Id: Ibe8d2117e1246e4097d1bedeadcd6d99618f8400 --- nova/tests/virt/libvirt/fakelibvirt.py | 2 +- nova/tests/virt/libvirt/test_driver.py | 119 ++----------------------- nova/tests/virt/libvirt/test_vif.py | 22 +---- nova/virt/libvirt/driver.py | 45 +--------- nova/virt/libvirt/vif.py | 27 +----- 5 files changed, 16 insertions(+), 199 deletions(-) diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py index 378e795848..f35cc3d43a 100644 --- a/nova/tests/virt/libvirt/fakelibvirt.py +++ b/nova/tests/virt/libvirt/fakelibvirt.py @@ -597,7 +597,7 @@ def delete(self, flags): class Connection(object): - def __init__(self, uri=None, readonly=False, version=9007): + def __init__(self, uri=None, readonly=False, version=9011): if not uri or uri == '': if allow_default_uri_connection: uri = 'qemu:///session' diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 2e918f670a..889f851ad4 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -2419,13 +2419,7 @@ def test_get_guest_cpu_config_default_lxc(self): {}, disk_info) self.assertIsNone(conf.cpu) - def test_get_guest_cpu_config_host_passthrough_new(self): - def get_lib_version_stub(): - return (0 * 1000 * 1000) + (9 * 1000) + 11 - - self.stubs.Set(self.conn, - "getLibVersion", - get_lib_version_stub) + def test_get_guest_cpu_config_host_passthrough(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -2443,13 +2437,7 @@ def get_lib_version_stub(): self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) - def test_get_guest_cpu_config_host_model_new(self): - def get_lib_version_stub(): - return (0 * 1000 * 1000) + (9 * 1000) + 11 - - self.stubs.Set(self.conn, - "getLibVersion", - get_lib_version_stub) + def test_get_guest_cpu_config_host_model(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -2467,13 +2455,7 @@ def get_lib_version_stub(): self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) - def test_get_guest_cpu_config_custom_new(self): - def get_lib_version_stub(): - return (0 * 1000 * 1000) + (9 * 1000) + 11 - - self.stubs.Set(self.conn, - "getLibVersion", - get_lib_version_stub) + def test_get_guest_cpu_config_custom(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -2493,97 +2475,6 @@ def get_lib_version_stub(): self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) - def test_get_guest_cpu_config_host_passthrough_old(self): - def get_lib_version_stub(): - return (0 * 1000 * 1000) + (9 * 1000) + 7 - - self.stubs.Set(self.conn, - "getLibVersion", - get_lib_version_stub) - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - instance_ref = db.instance_create(self.context, self.test_instance) - - self.flags(cpu_mode="host-passthrough", group='libvirt') - disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, - instance_ref) - self.assertRaises(exception.NovaException, - conn._get_guest_config, - instance_ref, - _fake_network_info(self.stubs, 1), - {}, - disk_info) - - def test_get_guest_cpu_config_host_model_old(self): - def get_lib_version_stub(): - return (0 * 1000 * 1000) + (9 * 1000) + 7 - - # Ensure we have a predictable host CPU - def get_host_capabilities_stub(self): - cpu = vconfig.LibvirtConfigGuestCPU() - cpu.model = "Opteron_G4" - cpu.vendor = "AMD" - - cpu.add_feature(vconfig.LibvirtConfigGuestCPUFeature("tm2")) - cpu.add_feature(vconfig.LibvirtConfigGuestCPUFeature("ht")) - - caps = vconfig.LibvirtConfigCaps() - caps.host = vconfig.LibvirtConfigCapsHost() - caps.host.cpu = cpu - return caps - - self.stubs.Set(self.conn, - "getLibVersion", - get_lib_version_stub) - self.stubs.Set(libvirt_driver.LibvirtDriver, - "_get_host_capabilities", - get_host_capabilities_stub) - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - instance_ref = db.instance_create(self.context, self.test_instance) - - self.flags(cpu_mode="host-model", group='libvirt') - disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, - instance_ref) - conf = conn._get_guest_config(instance_ref, - _fake_network_info(self.stubs, 1), - {}, disk_info) - self.assertIsInstance(conf.cpu, - vconfig.LibvirtConfigGuestCPU) - self.assertIsNone(conf.cpu.mode) - self.assertEqual(conf.cpu.model, "Opteron_G4") - self.assertEqual(conf.cpu.vendor, "AMD") - self.assertEqual(len(conf.cpu.features), 2) - self.assertEqual(conf.cpu.features.pop().name, "tm2") - self.assertEqual(conf.cpu.features.pop().name, "ht") - self.assertEqual(conf.cpu.sockets, 1) - self.assertEqual(conf.cpu.cores, 1) - self.assertEqual(conf.cpu.threads, 1) - - def test_get_guest_cpu_config_custom_old(self): - def get_lib_version_stub(): - return (0 * 1000 * 1000) + (9 * 1000) + 7 - - self.stubs.Set(self.conn, - "getLibVersion", - get_lib_version_stub) - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - instance_ref = db.instance_create(self.context, self.test_instance) - - self.flags(cpu_mode="custom", - cpu_model="Penryn", - group='libvirt') - disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, - instance_ref) - conf = conn._get_guest_config(instance_ref, - _fake_network_info(self.stubs, 1), - {}, disk_info) - self.assertIsInstance(conf.cpu, - vconfig.LibvirtConfigGuestCPU) - self.assertIsNone(conf.cpu.mode) - self.assertEqual(conf.cpu.model, "Penryn") - self.assertEqual(conf.cpu.sockets, 1) - self.assertEqual(conf.cpu.cores, 1) - self.assertEqual(conf.cpu.threads, 1) - def test_get_guest_cpu_topology(self): fake_flavor = objects.flavor.Flavor.get_by_id( self.context, @@ -5062,7 +4953,7 @@ def fake_none(*args, **kwargs): return def fake_getLibVersion(): - return 9007 + return 9011 def fake_getCapabilities(): return """ @@ -7768,7 +7659,7 @@ def fake_none(*args, **kwargs): return def fake_getLibVersion(): - return 9007 + return 9011 def fake_getCapabilities(): return """ diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py index 62011791b5..f120c3aba0 100644 --- a/nova/tests/virt/libvirt/test_vif.py +++ b/nova/tests/virt/libvirt/test_vif.py @@ -454,9 +454,8 @@ def test_model_qemu_no_firewall(self): self.vif_8021qbg, self.vif_iovisor, self.vif_mlnx, + self.vif_ovs, ) - self._test_model_qemu(self.vif_ovs, - libvirt_version=vif.LIBVIRT_OVS_VPORT_VERSION) def test_model_qemu_iptables(self): self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver") @@ -507,23 +506,8 @@ def _check_ivs_ethernet_driver(self, d, vif, dev_prefix): script = node.find("script").get("path") self.assertEqual(script, "") - def _check_ovs_ethernet_driver(self, d, vif, dev_prefix): - self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver") - xml = self._get_instance_xml(d, vif) - node = self._get_node(xml) - self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", - self.vif_ovs, prefix=dev_prefix) - script = node.find("script").get("path") - self.assertEqual(script, "") - - def test_ovs_ethernet_driver(self): - d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) - self._check_ovs_ethernet_driver(d, - self.vif_ovs, - "tap") - def test_unplug_ivs_ethernet(self): - d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010)) + d = vif.LibvirtGenericVIFDriver(self._get_conn()) with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete: delete.side_effect = processutils.ProcessExecutionError d.unplug_ivs_ethernet(None, self.vif_ovs) @@ -810,7 +794,7 @@ def test_direct_plug_with_port_filter_cap_no_nova_firewall(self): br_want = self.vif_midonet['devname'] xml = self._get_instance_xml(d, self.vif_ovs_filter_cap) node = self._get_node(xml) - self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", + self._assertTypeAndMacEquals(node, "bridge", "target", "dev", self.vif_ovs_filter_cap, br_want) def _check_neutron_hybrid_driver(self, d, vif, br_want): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 7759b940fb..4810260fb1 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -289,10 +289,9 @@ def repr_method(self): VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED, } -MIN_LIBVIRT_VERSION = (0, 9, 6) +MIN_LIBVIRT_VERSION = (0, 9, 11) # When the above version matches/exceeds this version # delete it & corresponding code using it -MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10) MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1) # Live snapshot requirements REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU" @@ -2948,29 +2947,6 @@ def _get_host_uuid(self): caps = self._get_host_capabilities() return caps.host.uuid - def _get_host_cpu_for_guest(self): - """Returns an instance of config.LibvirtConfigGuestCPU - representing the host's CPU model & topology with - policy for configuring a guest to match - """ - - caps = self._get_host_capabilities() - hostcpu = caps.host.cpu - guestcpu = vconfig.LibvirtConfigGuestCPU() - - guestcpu.model = hostcpu.model - guestcpu.vendor = hostcpu.vendor - guestcpu.arch = hostcpu.arch - - guestcpu.match = "exact" - - for hostfeat in hostcpu.features: - guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name) - guestfeat.policy = "require" - guestcpu.add_feature(guestfeat) - - return guestcpu - def _get_guest_cpu_model_config(self): mode = CONF.libvirt.cpu_mode model = CONF.libvirt.cpu_model @@ -3004,22 +2980,9 @@ def _get_guest_cpu_model_config(self): LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen", {'mode': mode, 'model': (model or "")}) - # TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is - # updated to be at least this new, we can kill off the elif - # blocks here - if self._has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION): - cpu = vconfig.LibvirtConfigGuestCPU() - cpu.mode = mode - cpu.model = model - elif mode == "custom": - cpu = vconfig.LibvirtConfigGuestCPU() - cpu.model = model - elif mode == "host-model": - cpu = self._get_host_cpu_for_guest() - elif mode == "host-passthrough": - msg = _("Passthrough of the host CPU was requested but " - "this libvirt version does not support this feature") - raise exception.NovaException(msg) + cpu = vconfig.LibvirtConfigGuestCPU() + cpu.mode = mode + cpu.model = model return cpu diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 108dc1aabd..e51f30da7e 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -46,9 +46,6 @@ CONF.import_opt('virt_type', 'nova.virt.libvirt.driver', group='libvirt') CONF.import_opt('use_ipv6', 'nova.netconf') -# Since libvirt 0.9.11, -# supports OpenVSwitch natively. -LIBVIRT_OVS_VPORT_VERSION = 9011 DEV_PREFIX_ETH = 'eth' @@ -86,20 +83,10 @@ class LibvirtBaseVIFDriver(object): def __init__(self, get_connection): self.get_connection = get_connection - self.libvirt_version = None def _normalize_vif_type(self, vif_type): return vif_type.replace('2.1q', '2q') - def has_libvirt_version(self, want): - if self.libvirt_version is None: - conn = self.get_connection() - self.libvirt_version = conn.getLibVersion() - - if self.libvirt_version >= want: - return True - return False - def get_vif_devname(self, vif): if 'devname' in vif: return vif['devname'] @@ -232,14 +219,10 @@ def get_config_ovs(self, instance, vif, image_meta, inst_type): return self.get_config_ovs_hybrid(instance, vif, image_meta, inst_type) - elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION): + else: return self.get_config_ovs_bridge(instance, vif, image_meta, inst_type) - else: - return self.get_config_ovs_ethernet(instance, vif, - image_meta, - inst_type) def get_config_ivs_hybrid(self, instance, vif, image_meta, inst_type): @@ -442,10 +425,8 @@ def plug_ovs_hybrid(self, instance, vif): def plug_ovs(self, instance, vif): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): self.plug_ovs_hybrid(instance, vif) - elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION): - self.plug_ovs_bridge(instance, vif) else: - self.plug_ovs_ethernet(instance, vif) + self.plug_ovs_bridge(instance, vif) def plug_ivs_ethernet(self, instance, vif): super(LibvirtGenericVIFDriver, @@ -633,10 +614,8 @@ def unplug_ovs_hybrid(self, instance, vif): def unplug_ovs(self, instance, vif): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): self.unplug_ovs_hybrid(instance, vif) - elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION): - self.unplug_ovs_bridge(instance, vif) else: - self.unplug_ovs_ethernet(instance, vif) + self.unplug_ovs_bridge(instance, vif) def unplug_ivs_ethernet(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" From 8eb74693691a2468d5a0da69abc12333d9a05400 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Fri, 27 Jun 2014 13:06:02 +0100 Subject: [PATCH 174/486] libvirt: fill in metadata when launching instances When generating the guest XML config, fill in the nova specific metadata whose schema was previously defined. Blueprint: libvirt-driver-domain-metadata Change-Id: I3e1f0eb5bb0e71732a18609ff11f8ee6deb1a8da --- nova/tests/virt/libvirt/test_driver.py | 81 +++++++++++++++++++++++--- nova/virt/libvirt/driver.py | 42 ++++++++++++- 2 files changed, 114 insertions(+), 9 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 2e918f670a..eece26ea9d 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -22,6 +22,7 @@ import re import shutil import tempfile +import time import uuid from xml.dom import minidom @@ -1033,20 +1034,46 @@ def test_lxc_get_host_capabilities_failed(self): self.assertEqual(vconfig.LibvirtConfigCaps, type(caps)) self.assertNotIn('aes', [x.name for x in caps.host.cpu.features]) - def test_get_guest_config(self): + @mock.patch.object(time, "time") + def test_get_guest_config(self, time_mock): + time_mock.return_value = 1234567.89 + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - instance_ref = db.instance_create(self.context, self.test_instance) + + test_instance = copy.deepcopy(self.test_instance) + test_instance["display_name"] = "purple tomatoes" + + ctxt = context.RequestContext(project_id=123, + project_name="aubergine", + user_id=456, + user_name="pie") + + flavor = objects.Flavor.get_by_id( + ctxt, test_instance["instance_type_id"]) + flavor.memory_mb = 6 + flavor.vcpus = 28 + flavor.root_gb = 496 + flavor.ephemeral_gb = 8128 + flavor.swap = 33550336 + instance_ref = db.instance_create(ctxt, test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref) - cfg = conn._get_guest_config(instance_ref, - _fake_network_info(self.stubs, 1), - {}, disk_info) + + with mock.patch.object(objects.Flavor, + "get_by_id") as flavor_mock: + flavor_mock.return_value = flavor + + cfg = conn._get_guest_config(instance_ref, + _fake_network_info(self.stubs, 1), + {}, disk_info, + context=ctxt) + self.assertEqual(cfg.uuid, instance_ref["uuid"]) self.assertEqual(cfg.acpi, True) self.assertEqual(cfg.apic, True) - self.assertEqual(cfg.memory, 2 * units.Mi) - self.assertEqual(cfg.vcpus, 1) + self.assertEqual(cfg.memory, 6 * units.Ki) + self.assertEqual(cfg.vcpus, 28) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) @@ -1068,6 +1095,46 @@ def test_get_guest_config(self): self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestVideo) + self.assertEqual(len(cfg.metadata), 1) + self.assertIsInstance(cfg.metadata[0], + vconfig.LibvirtConfigGuestMetaNovaInstance) + self.assertEqual(version.version_string_with_package(), + cfg.metadata[0].package) + self.assertEqual("purple tomatoes", + cfg.metadata[0].name) + self.assertEqual(1234567.89, + cfg.metadata[0].creationTime) + self.assertEqual("image", + cfg.metadata[0].roottype) + self.assertEqual(str(instance_ref["image_ref"]), + cfg.metadata[0].rootid) + + self.assertIsInstance(cfg.metadata[0].owner, + vconfig.LibvirtConfigGuestMetaNovaOwner) + self.assertEqual(456, + cfg.metadata[0].owner.userid) + self.assertEqual("pie", + cfg.metadata[0].owner.username) + self.assertEqual(123, + cfg.metadata[0].owner.projectid) + self.assertEqual("aubergine", + cfg.metadata[0].owner.projectname) + + self.assertIsInstance(cfg.metadata[0].flavor, + vconfig.LibvirtConfigGuestMetaNovaFlavor) + self.assertEqual("m1.small", + cfg.metadata[0].flavor.name) + self.assertEqual(6, + cfg.metadata[0].flavor.memory) + self.assertEqual(28, + cfg.metadata[0].flavor.vcpus) + self.assertEqual(496, + cfg.metadata[0].flavor.disk) + self.assertEqual(8128, + cfg.metadata[0].flavor.ephemeral) + self.assertEqual(33550336, + cfg.metadata[0].flavor.swap) + def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 7759b940fb..fb03f6aefe 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -3174,8 +3174,41 @@ def _get_guest_pci_device(self, pci_device): return dev + def _get_guest_config_meta(self, context, instance, flavor): + """Get metadata config for guest.""" + + meta = vconfig.LibvirtConfigGuestMetaNovaInstance() + meta.package = version.version_string_with_package() + meta.name = instance["display_name"] + meta.creationTime = time.time() + + if instance["image_ref"] not in ("", None): + meta.roottype = "image" + meta.rootid = instance["image_ref"] + + if context is not None: + ometa = vconfig.LibvirtConfigGuestMetaNovaOwner() + ometa.userid = context.user_id + ometa.username = context.user_name + ometa.projectid = context.project_id + ometa.projectname = context.project_name + meta.owner = ometa + + fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor() + fmeta.name = flavor.name + fmeta.memory = flavor.memory_mb + fmeta.vcpus = flavor.vcpus + fmeta.ephemeral = flavor.ephemeral_gb + fmeta.disk = flavor.root_gb + fmeta.swap = flavor.swap + + meta.flavor = fmeta + + return meta + def _get_guest_config(self, instance, network_info, image_meta, - disk_info, rescue=None, block_device_info=None): + disk_info, rescue=None, block_device_info=None, + context=None): """Get config data for parameters. :param rescue: optional dictionary that should contain the key @@ -3201,6 +3234,10 @@ def _get_guest_config(self, instance, network_info, image_meta, guest.vcpus = flavor.vcpus guest.cpuset = hardware.get_vcpu_pin_set() + guest.metadata.append(self._get_guest_config_meta(context, + instance, + flavor)) + cputuning = ['shares', 'period', 'quota'] for name in cputuning: key = "quota:cpu_" + name @@ -3539,7 +3576,8 @@ def _get_guest_xml(self, context, instance, network_info, disk_info, # need to sanitize the password in the message. LOG.debug(logging.mask_password(msg), instance=instance) conf = self._get_guest_config(instance, network_info, image_meta, - disk_info, rescue, block_device_info) + disk_info, rescue, block_device_info, + context) xml = conf.to_xml() if write_to_disk: From f0883800660ab546f5667b973f339c4df4c5c458 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 21 Jul 2014 15:09:08 +0100 Subject: [PATCH 175/486] libvirt: add tests for _live_snapshot and _swap_volume methods The _live_snapshot and _swap_volume methods do a non-trivial sequence of libvirt API calls which should be validated in the unit tests Closes-bug: #1346327 Change-Id: I14bfa08f0dbb4eaa7c4f069f70c257ba7605989b --- nova/tests/virt/libvirt/fake_libvirt_utils.py | 4 ++ nova/tests/virt/libvirt/fakelibvirt.py | 7 ++ nova/tests/virt/libvirt/test_driver.py | 67 +++++++++++++++++++ 3 files changed, 78 insertions(+) diff --git a/nova/tests/virt/libvirt/fake_libvirt_utils.py b/nova/tests/virt/libvirt/fake_libvirt_utils.py index 1585e60d92..e4f67461ee 100644 --- a/nova/tests/virt/libvirt/fake_libvirt_utils.py +++ b/nova/tests/virt/libvirt/fake_libvirt_utils.py @@ -90,6 +90,10 @@ def create_cow_image(backing_file, path): pass +def get_disk_size(path): + return 0 + + def get_disk_backing_file(path): return disk_backing_files.get(path, None) diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py index 378e795848..6ba0a75c1b 100644 --- a/nova/tests/virt/libvirt/fakelibvirt.py +++ b/nova/tests/virt/libvirt/fakelibvirt.py @@ -73,6 +73,13 @@ def _reset(): VIR_DOMAIN_CRASHED = 6 VIR_DOMAIN_XML_SECURE = 1 +VIR_DOMAIN_XML_INACTIVE = 2 + +VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1 +VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2 +VIR_DOMAIN_BLOCK_REBASE_COPY = 8 + +VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2 VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0 diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 2e918f670a..817c1ed9fb 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -8127,6 +8127,73 @@ def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): conn.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) + def test_swap_volume(self): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + + mock_dom = mock.MagicMock() + + with mock.patch.object(drvr._conn, 'defineXML', + create=True) as mock_define: + xmldoc = "" + srcfile = "/first/path" + dstfile = "/second/path" + + mock_dom.XMLDesc.return_value = xmldoc + mock_dom.isPersistent.return_value = True + + drvr._swap_volume(mock_dom, srcfile, dstfile) + + mock_dom.XMLDesc.assert_called_once_with(0) + mock_dom.blockRebase.assert_called_once_with( + srcfile, dstfile, 0, + libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | + libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) + + mock_define.assert_called_once_with(xmldoc) + + def test_live_snapshot(self): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + + mock_dom = mock.MagicMock() + + with contextlib.nested( + mock.patch.object(drvr._conn, 'defineXML', create=True), + mock.patch.object(fake_libvirt_utils, 'get_disk_size'), + mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'), + mock.patch.object(fake_libvirt_utils, 'create_cow_image'), + mock.patch.object(fake_libvirt_utils, 'chown'), + mock.patch.object(fake_libvirt_utils, 'extract_snapshot'), + ) as (mock_define, mock_size, mock_backing, mock_create_cow, + mock_chown, mock_snapshot): + + xmldoc = "" + srcfile = "/first/path" + dstfile = "/second/path" + bckfile = "/other/path" + dltfile = dstfile + ".delta" + + mock_dom.XMLDesc.return_value = xmldoc + mock_dom.isPersistent.return_value = True + mock_size.return_value = 1004009 + mock_backing.return_value = bckfile + + drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2") + + mock_dom.XMLDesc.assert_called_once_with(0) + mock_dom.blockRebase.assert_called_once_with( + srcfile, dltfile, 0, + libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | + libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | + libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW) + + mock_size.assert_called_once_with(srcfile) + mock_backing.assert_called_once_with(srcfile, basename=False) + mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009) + mock_chown.assert_called_once_with(dltfile, os.getuid()) + mock_snapshot.assert_called_once_with(dltfile, "qcow2", + dstfile, "qcow2") + mock_define.assert_called_once_with(xmldoc) + class HostStateTestCase(test.TestCase): From 4f040295814c7c4bb694411ad6baf2d1c0db62a4 Mon Sep 17 00:00:00 2001 From: Drew Thorstensen Date: Tue, 8 Jul 2014 10:14:36 -0500 Subject: [PATCH 176/486] Pass errors from detach methods back to api proc The initial implementation of the detach_interface method was built as cast invocations. The respective attach method was built as a call invocation. What this led to was, if the user attempted to attach an incorrect port, that error would be passed back up to the API properly. However, if the user attempted to detach an invalid interface, the response from the API would be an HTTP 202 (because the cast assumes success). The root error may be logged in the respective nova compute log, but it did not make its way back up to the user. This change updates the decorators to the manager methods to update the instance's fault when the attach or detach encounters an issue. Change-Id: I111b1c8a492f12587f86ab83825060029605390d Closes-Bug: #1339098 --- nova/compute/manager.py | 6 ++++ nova/tests/compute/test_compute_mgr.py | 42 ++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c9e94ef345..e829511c3d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4493,6 +4493,9 @@ def remove_volume_connection(self, context, volume_id, instance): pass @object_compat + @wrap_exception() + @reverts_task_state + @wrap_instance_fault def attach_interface(self, context, instance, network_id, port_id, requested_ip): """Use hotplug to add an network adapter to an instance.""" @@ -4510,6 +4513,9 @@ def attach_interface(self, context, instance, network_id, port_id, return network_info[0] @object_compat + @wrap_exception() + @reverts_task_state + @wrap_instance_fault def detach_interface(self, context, instance, port_id): """Detach an network adapter from an instance.""" network_info = instance.info_cache.network_info diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 1fcfec5d53..717c34f37e 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -877,6 +877,48 @@ def get_by_filters(self, *args, **kwargs): self.assertFalse(c.cleaned) self.assertEqual('1', c.system_metadata['clean_attempts']) + def test_attach_interface_failure(self): + # Test that the fault methods are invoked when an attach fails + db_instance = fake_instance.fake_db_instance() + f_instance = objects.Instance._from_db_object(self.context, + objects.Instance(), + db_instance) + e = exception.InterfaceAttachFailed(instance=f_instance) + + @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') + @mock.patch.object(self.compute.network_api, + 'allocate_port_for_instance', + side_effect=e) + def do_test(meth, add_fault): + self.assertRaises(exception.InterfaceAttachFailed, + self.compute.attach_interface, + self.context, f_instance, 'net_id', 'port_id', + None) + add_fault.assert_has_calls( + mock.call(self.context, f_instance, e, + mock.ANY)) + + do_test() + + def test_detach_interface_failure(self): + # Test that the fault methods are invoked when a detach fails + + # Build test data that will cause a PortNotFound exception + f_instance = mock.MagicMock() + f_instance.info_cache = mock.MagicMock() + f_instance.info_cache.network_info = [] + + @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') + @mock.patch.object(self.compute, '_set_instance_error_state') + def do_test(meth, add_fault): + self.assertRaises(exception.PortNotFound, + self.compute.detach_interface, + self.context, f_instance, 'port_id') + add_fault.assert_has_calls( + mock.call(self.context, f_instance, mock.ANY, mock.ANY)) + + do_test() + def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volume states are OK From 5e4a5f0d8c62ca6e94ae6db16e9fbe0428805158 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Fri, 25 Jul 2014 04:19:10 -0700 Subject: [PATCH 177/486] Do not pass instances without host to compute API Even if the server external events extension filters out events whose related instance does not have a host, the corresponding instance is still sent to the compute API module. As this might result in KeyError, instance without host should be filtered out before calling the compute API module. Change-Id: If5229ec3059076dbc9f4abb6625504e8864c265e Closes-Bug: #1348584 --- .../compute/contrib/server_external_events.py | 10 ++++++---- .../compute/contrib/test_server_external_events.py | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/compute/contrib/server_external_events.py b/nova/api/openstack/compute/contrib/server_external_events.py index 9ecddde261..9c3cd4f27f 100644 --- a/nova/api/openstack/compute/contrib/server_external_events.py +++ b/nova/api/openstack/compute/contrib/server_external_events.py @@ -71,7 +71,8 @@ def create(self, req, body): authorize(context, action='create') response_events = [] - accepted = [] + accepted_events = [] + accepted_instances = set() instances = {} result = 200 @@ -120,7 +121,8 @@ def create(self, req, body): # it will not be possible to dispatch the event if instance: if instance.host: - accepted.append(event) + accepted_events.append(event) + accepted_instances.add(instance) LOG.audit(_('Creating event %(name)s:%(tag)s for instance ' '%(instance_uuid)s'), dict(event.iteritems())) @@ -139,9 +141,9 @@ def create(self, req, body): response_events.append(_event) - if accepted: + if accepted_events: self.compute_api.external_instance_event( - context, instances.values(), accepted) + context, accepted_instances, accepted_events) else: msg = _('No instances found for any event') raise webob.exc.HTTPNotFound(explanation=msg) diff --git a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py index 160f6f59a3..c2852fdde4 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py @@ -115,9 +115,9 @@ def test_create_event_instance_has_no_host(self): body = self.default_body body['events'][0]['server_uuid'] = fake_instance_uuids[-1] req = self._create_req(body) + # the instance without host should not be passed to the compute layer result, code = self._assert_call(req, body, - [fake_instance_uuids[1], - fake_instance_uuids[-1]], + [fake_instance_uuids[1]], ['network-changed']) self.assertEqual(422, result['events'][0]['code']) self.assertEqual('failed', result['events'][0]['status']) From 8b6ea606d9dd883857c13ae43baf1e80aa0e8c58 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 2 Jul 2014 18:06:30 +0100 Subject: [PATCH 178/486] virt: force TCG with libguestfs unless KVM is enabled in libvirt If the libvirt driver has not been configured to use KVM, then the libguestfs module should be forced to use TCG. This is particularly important when running Nova inside a VM, which might claim to have VMX/SVM support when it is in fact broken. This will avoid libguestfs hanging in such scenarios. Resolves-bug: #1286256 Change-Id: I9316dcedd65244c60d468b270311f032b45b051f --- nova/tests/fakeguestfs.py | 4 +++ nova/tests/virt/test_virt_disk_vfs_guestfs.py | 25 ++++++++++++++++++- nova/virt/disk/vfs/guestfs.py | 24 +++++++++++++++++- nova/virt/libvirt/driver.py | 7 ++++++ 4 files changed, 58 insertions(+), 2 deletions(-) diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py index c8424282d5..7a3b33039c 100644 --- a/nova/tests/fakeguestfs.py +++ b/nova/tests/fakeguestfs.py @@ -27,6 +27,7 @@ def __init__(self, **kwargs): self.files = {} self.auginit = False self.root_mounted = False + self.backend_settings = None def launch(self): self.running = True @@ -36,6 +37,9 @@ def shutdown(self): self.mounts = [] self.drives = [] + def set_backend_settings(self, settings): + self.backend_settings = settings + def close(self): self.closed = True diff --git a/nova/tests/virt/test_virt_disk_vfs_guestfs.py b/nova/tests/virt/test_virt_disk_vfs_guestfs.py index 353319b988..692576cdc0 100644 --- a/nova/tests/virt/test_virt_disk_vfs_guestfs.py +++ b/nova/tests/virt/test_virt_disk_vfs_guestfs.py @@ -27,12 +27,23 @@ def setUp(self): sys.modules['guestfs'] = fakeguestfs vfsimpl.guestfs = fakeguestfs - def test_appliance_setup_inspect(self): + def _do_test_appliance_setup_inspect(self, forcetcg): + if forcetcg: + vfsimpl.force_tcg() + else: + vfsimpl.force_tcg(False) + vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2", partition=-1) vfs.setup() + if forcetcg: + self.assertEqual(vfs.handle.backend_settings, "force_tcg") + vfsimpl.force_tcg(False) + else: + self.assertIsNone(vfs.handle.backend_settings) + self.assertEqual(vfs.handle.running, True) self.assertEqual(len(vfs.handle.mounts), 3) self.assertEqual(vfs.handle.mounts[0][1], @@ -52,6 +63,12 @@ def test_appliance_setup_inspect(self): self.assertEqual(handle.closed, True) self.assertEqual(len(handle.mounts), 0) + def test_appliance_setup_inspect_auto(self): + self._do_test_appliance_setup_inspect(False) + + def test_appliance_setup_inspect_tcg(self): + self._do_test_appliance_setup_inspect(True) + def test_appliance_setup_inspect_no_root_raises(self): vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2", @@ -59,6 +76,8 @@ def test_appliance_setup_inspect_no_root_raises(self): # call setup to init the handle so we can stub it vfs.setup() + self.assertIsNone(vfs.handle.backend_settings) + def fake_inspect_os(): return [] @@ -72,6 +91,8 @@ def test_appliance_setup_inspect_multi_boots_raises(self): # call setup to init the handle so we can stub it vfs.setup() + self.assertIsNone(vfs.handle.backend_settings) + def fake_inspect_os(): return ['fake1', 'fake2'] @@ -84,6 +105,7 @@ def test_appliance_setup_static_nopart(self): partition=None) vfs.setup() + self.assertIsNone(vfs.handle.backend_settings) self.assertEqual(vfs.handle.running, True) self.assertEqual(len(vfs.handle.mounts), 1) self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda") @@ -103,6 +125,7 @@ def test_appliance_setup_static_part(self): partition=2) vfs.setup() + self.assertIsNone(vfs.handle.backend_settings) self.assertEqual(vfs.handle.running, True) self.assertEqual(len(vfs.handle.mounts), 1) self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda2") diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py index a65f649ab3..fabfde5ba4 100644 --- a/nova/virt/disk/vfs/guestfs.py +++ b/nova/virt/disk/vfs/guestfs.py @@ -15,7 +15,7 @@ from eventlet import tpool from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LI from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.virt.disk.vfs import api as vfs @@ -24,6 +24,18 @@ LOG = logging.getLogger(__name__) guestfs = None +forceTCG = False + + +def force_tcg(force=True): + """Prevent libguestfs trying to use KVM acceleration + + It is a good idea to call this if it is known that + KVM is not desired, even if technically available. + """ + + global forceTCG + forceTCG = force class VFSGuestFS(vfs.VFS): @@ -116,6 +128,16 @@ def setup(self): else: raise + try: + if forceTCG: + self.handle.set_backend_settings("force_tcg") + except AttributeError as ex: + # set_backend_settings method doesn't exist in older + # libguestfs versions, so nothing we can do but ignore + LOG.info(_LI("Unable to force TCG mode, libguestfs too old?"), + ex) + pass + try: self.handle.add_drive_opts(self.imgfile, format=self.imgfmt) self.handle.launch() diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 7759b940fb..16c4e695cd 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -81,6 +81,7 @@ from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt.disk import api as disk +from nova.virt.disk.vfs import guestfs from nova.virt import driver from nova.virt import event as virtevent from nova.virt import firewall @@ -631,6 +632,12 @@ def init_host(self, host): libvirt.virEventRegisterDefaultImpl() self._do_quality_warnings() + # Stop libguestfs using KVM unless we're also configured + # to use this. This solves problem where people need to + # stop Nova use of KVM because nested-virt is broken + if CONF.libvirt.virt_type != "kvm": + guestfs.force_tcg() + if not self._has_min_version(MIN_LIBVIRT_VERSION): major = MIN_LIBVIRT_VERSION[0] minor = MIN_LIBVIRT_VERSION[1] From 568429466d01a8f7ec7ce4e2448f27b32491e7d9 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Tue, 8 Jul 2014 11:45:24 +0100 Subject: [PATCH 179/486] virt: move disk tests into a sub-directory The test_virt_disk.py, test_virt_disk_vfs_guestfs.py, and test_virt_disk_vfs_localfs.py test cases were in the top level nova/tests/virt directory. Current policy is to use a directory location that matches the source being tested, so they should be in either nova/tests/virt/disk or nova/tests/virt/disk/vfs respectively. The test_loop.py and test_nbd.py tests should be in nova/tests/virt/disk/mount too. Finally fakeguestfs.py should also be kept in the nova/tests/virt/disk/vfs directory Change-Id: Iebee539d202d5122939ae478fd482f19b19f604d --- nova/tests/virt/disk/mount/__init__.py | 0 nova/tests/virt/disk/{ => mount}/test_loop.py | 0 nova/tests/virt/disk/{ => mount}/test_nbd.py | 0 nova/tests/virt/{test_virt_disk.py => disk/test_inject.py} | 2 +- nova/tests/virt/disk/vfs/__init__.py | 0 nova/tests/{ => virt/disk/vfs}/fakeguestfs.py | 0 .../{test_virt_disk_vfs_guestfs.py => disk/vfs/test_guestfs.py} | 2 +- .../{test_virt_disk_vfs_localfs.py => disk/vfs/test_localfs.py} | 0 8 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 nova/tests/virt/disk/mount/__init__.py rename nova/tests/virt/disk/{ => mount}/test_loop.py (100%) rename nova/tests/virt/disk/{ => mount}/test_nbd.py (100%) rename nova/tests/virt/{test_virt_disk.py => disk/test_inject.py} (99%) create mode 100644 nova/tests/virt/disk/vfs/__init__.py rename nova/tests/{ => virt/disk/vfs}/fakeguestfs.py (100%) rename nova/tests/virt/{test_virt_disk_vfs_guestfs.py => disk/vfs/test_guestfs.py} (99%) rename nova/tests/virt/{test_virt_disk_vfs_localfs.py => disk/vfs/test_localfs.py} (100%) diff --git a/nova/tests/virt/disk/mount/__init__.py b/nova/tests/virt/disk/mount/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nova/tests/virt/disk/test_loop.py b/nova/tests/virt/disk/mount/test_loop.py similarity index 100% rename from nova/tests/virt/disk/test_loop.py rename to nova/tests/virt/disk/mount/test_loop.py diff --git a/nova/tests/virt/disk/test_nbd.py b/nova/tests/virt/disk/mount/test_nbd.py similarity index 100% rename from nova/tests/virt/disk/test_nbd.py rename to nova/tests/virt/disk/mount/test_nbd.py diff --git a/nova/tests/virt/test_virt_disk.py b/nova/tests/virt/disk/test_inject.py similarity index 99% rename from nova/tests/virt/test_virt_disk.py rename to nova/tests/virt/disk/test_inject.py index fe886293fb..9685c2e6f2 100644 --- a/nova/tests/virt/test_virt_disk.py +++ b/nova/tests/virt/disk/test_inject.py @@ -17,7 +17,7 @@ from nova import exception from nova import test -from nova.tests import fakeguestfs +from nova.tests.virt.disk.vfs import fakeguestfs from nova.virt.disk import api as diskapi from nova.virt.disk.vfs import guestfs as vfsguestfs diff --git a/nova/tests/virt/disk/vfs/__init__.py b/nova/tests/virt/disk/vfs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nova/tests/fakeguestfs.py b/nova/tests/virt/disk/vfs/fakeguestfs.py similarity index 100% rename from nova/tests/fakeguestfs.py rename to nova/tests/virt/disk/vfs/fakeguestfs.py diff --git a/nova/tests/virt/test_virt_disk_vfs_guestfs.py b/nova/tests/virt/disk/vfs/test_guestfs.py similarity index 99% rename from nova/tests/virt/test_virt_disk_vfs_guestfs.py rename to nova/tests/virt/disk/vfs/test_guestfs.py index 692576cdc0..9f3e8a8cb7 100644 --- a/nova/tests/virt/test_virt_disk_vfs_guestfs.py +++ b/nova/tests/virt/disk/vfs/test_guestfs.py @@ -16,7 +16,7 @@ from nova import exception from nova import test -from nova.tests import fakeguestfs +from nova.tests.virt.disk.vfs import fakeguestfs from nova.virt.disk.vfs import guestfs as vfsimpl diff --git a/nova/tests/virt/test_virt_disk_vfs_localfs.py b/nova/tests/virt/disk/vfs/test_localfs.py similarity index 100% rename from nova/tests/virt/test_virt_disk_vfs_localfs.py rename to nova/tests/virt/disk/vfs/test_localfs.py From 0d4ff34c891e1bd1366f18411a0be514e56161b9 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Tue, 8 Jul 2014 11:55:05 +0100 Subject: [PATCH 180/486] virt: switch order of args to assertEqual in guestfs test Policy is to have the expected data first in an assertEqual call, so update the guestfs test suite to match. Change-Id: Ic0cbe354dde87881ebaec78bb2cb03a32edd36c5 --- nova/tests/virt/disk/vfs/test_guestfs.py | 87 ++++++++++++------------ 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/nova/tests/virt/disk/vfs/test_guestfs.py b/nova/tests/virt/disk/vfs/test_guestfs.py index 9f3e8a8cb7..20b15488e5 100644 --- a/nova/tests/virt/disk/vfs/test_guestfs.py +++ b/nova/tests/virt/disk/vfs/test_guestfs.py @@ -39,29 +39,30 @@ def _do_test_appliance_setup_inspect(self, forcetcg): vfs.setup() if forcetcg: - self.assertEqual(vfs.handle.backend_settings, "force_tcg") + self.assertEqual("force_tcg", vfs.handle.backend_settings) vfsimpl.force_tcg(False) else: self.assertIsNone(vfs.handle.backend_settings) - self.assertEqual(vfs.handle.running, True) - self.assertEqual(len(vfs.handle.mounts), 3) - self.assertEqual(vfs.handle.mounts[0][1], - "/dev/mapper/guestvgf-lv_root") - self.assertEqual(vfs.handle.mounts[1][1], "/dev/vda1") - self.assertEqual(vfs.handle.mounts[2][1], - "/dev/mapper/guestvgf-lv_home") - self.assertEqual(vfs.handle.mounts[0][2], "/") - self.assertEqual(vfs.handle.mounts[1][2], "/boot") - self.assertEqual(vfs.handle.mounts[2][2], "/home") + self.assertTrue(vfs.handle.running) + self.assertEqual(3, len(vfs.handle.mounts)) + self.assertEqual("/dev/mapper/guestvgf-lv_root", + vfs.handle.mounts[0][1]) + self.assertEqual("/dev/vda1", + vfs.handle.mounts[1][1]) + self.assertEqual("/dev/mapper/guestvgf-lv_home", + vfs.handle.mounts[2][1]) + self.assertEqual("/", vfs.handle.mounts[0][2]) + self.assertEqual("/boot", vfs.handle.mounts[1][2]) + self.assertEqual("/home", vfs.handle.mounts[2][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) - self.assertEqual(handle.running, False) - self.assertEqual(handle.closed, True) - self.assertEqual(len(handle.mounts), 0) + self.assertFalse(handle.running) + self.assertTrue(handle.closed) + self.assertEqual(0, len(handle.mounts)) def test_appliance_setup_inspect_auto(self): self._do_test_appliance_setup_inspect(False) @@ -106,18 +107,18 @@ def test_appliance_setup_static_nopart(self): vfs.setup() self.assertIsNone(vfs.handle.backend_settings) - self.assertEqual(vfs.handle.running, True) - self.assertEqual(len(vfs.handle.mounts), 1) - self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda") - self.assertEqual(vfs.handle.mounts[0][2], "/") + self.assertTrue(vfs.handle.running) + self.assertEqual(1, len(vfs.handle.mounts)) + self.assertEqual("/dev/sda", vfs.handle.mounts[0][1]) + self.assertEqual("/", vfs.handle.mounts[0][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) - self.assertEqual(handle.running, False) - self.assertEqual(handle.closed, True) - self.assertEqual(len(handle.mounts), 0) + self.assertFalse(handle.running) + self.assertTrue(handle.closed) + self.assertEqual(0, len(handle.mounts)) def test_appliance_setup_static_part(self): vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", @@ -126,18 +127,18 @@ def test_appliance_setup_static_part(self): vfs.setup() self.assertIsNone(vfs.handle.backend_settings) - self.assertEqual(vfs.handle.running, True) - self.assertEqual(len(vfs.handle.mounts), 1) - self.assertEqual(vfs.handle.mounts[0][1], "/dev/sda2") - self.assertEqual(vfs.handle.mounts[0][2], "/") + self.assertTrue(vfs.handle.running) + self.assertEqual(1, len(vfs.handle.mounts)) + self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1]) + self.assertEqual("/", vfs.handle.mounts[0][2]) handle = vfs.handle vfs.teardown() self.assertIsNone(vfs.handle) - self.assertEqual(handle.running, False) - self.assertEqual(handle.closed, True) - self.assertEqual(len(handle.mounts), 0) + self.assertFalse(handle.running) + self.assertTrue(handle.closed) + self.assertEqual(0, len(handle.mounts)) def test_makepath(self): vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2") @@ -158,8 +159,8 @@ def test_append_file(self): vfs.append_file("/some/file", " Goodbye") self.assertIn("/some/file", vfs.handle.files) - self.assertEqual(vfs.handle.files["/some/file"]["content"], - "Hello World Goodbye") + self.assertEqual("Hello World Goodbye", + vfs.handle.files["/some/file"]["content"]) vfs.teardown() @@ -169,15 +170,15 @@ def test_replace_file(self): vfs.replace_file("/some/file", "Goodbye") self.assertIn("/some/file", vfs.handle.files) - self.assertEqual(vfs.handle.files["/some/file"]["content"], - "Goodbye") + self.assertEqual("Goodbye", + vfs.handle.files["/some/file"]["content"]) vfs.teardown() def test_read_file(self): vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2") vfs.setup() - self.assertEqual(vfs.read_file("/some/file"), "Hello World") + self.assertEqual("Hello World", vfs.read_file("/some/file")) vfs.teardown() @@ -196,10 +197,10 @@ def test_set_permissions(self): vfs.setup() vfs.read_file("/some/file") - self.assertEqual(vfs.handle.files["/some/file"]["mode"], 0o700) + self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"]) vfs.set_permissions("/some/file", 0o7777) - self.assertEqual(vfs.handle.files["/some/file"]["mode"], 0o7777) + self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"]) vfs.teardown() @@ -208,20 +209,20 @@ def test_set_ownership(self): vfs.setup() vfs.read_file("/some/file") - self.assertEqual(vfs.handle.files["/some/file"]["uid"], 100) - self.assertEqual(vfs.handle.files["/some/file"]["gid"], 100) + self.assertEqual(100, vfs.handle.files["/some/file"]["uid"]) + self.assertEqual(100, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", "fred", None) - self.assertEqual(vfs.handle.files["/some/file"]["uid"], 105) - self.assertEqual(vfs.handle.files["/some/file"]["gid"], 100) + self.assertEqual(105, vfs.handle.files["/some/file"]["uid"]) + self.assertEqual(100, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", None, "users") - self.assertEqual(vfs.handle.files["/some/file"]["uid"], 105) - self.assertEqual(vfs.handle.files["/some/file"]["gid"], 500) + self.assertEqual(105, vfs.handle.files["/some/file"]["uid"]) + self.assertEqual(500, vfs.handle.files["/some/file"]["gid"]) vfs.set_ownership("/some/file", "joe", "admins") - self.assertEqual(vfs.handle.files["/some/file"]["uid"], 110) - self.assertEqual(vfs.handle.files["/some/file"]["gid"], 600) + self.assertEqual(110, vfs.handle.files["/some/file"]["uid"]) + self.assertEqual(600, vfs.handle.files["/some/file"]["gid"]) vfs.teardown() From e780b10189cf70130d935652eda08a740fe6e417 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 14 Jul 2014 14:25:14 +0100 Subject: [PATCH 181/486] Add standard constants for CPU architectures Introduce a nova/compute/arch.py module to serve a similar role to nova/compute/vmmode.py, by providing constants for known CPU architectures and validation/canonicalization APIs Related-bug: #1348624 Change-Id: Idd4907b5488dd0832c15783d87d57d77ae654519 --- nova/compute/arch.py | 155 ++++++++++++++++++++++++++++++++ nova/exception.py | 4 + nova/tests/compute/test_arch.py | 56 ++++++++++++ 3 files changed, 215 insertions(+) create mode 100644 nova/compute/arch.py create mode 100644 nova/tests/compute/test_arch.py diff --git a/nova/compute/arch.py b/nova/compute/arch.py new file mode 100644 index 0000000000..12e9f05151 --- /dev/null +++ b/nova/compute/arch.py @@ -0,0 +1,155 @@ +# Copyright 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Constants and helper APIs for dealing with CPU architectures + +The constants provide the standard names for all known processor +architectures. Many have multiple variants to deal with big-endian +vs little-endian modes, as well as 32 vs 64 bit word sizes. These +names are chosen to be identical to the architecture names expected +by libvirt, so if ever adding new ones, ensure it matches libvirt's +expectation. +""" + +import os + +from nova import exception + +ALPHA = "alpha" +ARMV6 = "armv6" +ARMV7 = "armv7l" +ARMV7B = "armv7b" + +AARCH64 = "aarch64" +CRIS = "cris" +I686 = "i686" +IA64 = "ia64" +LM32 = "lm32" + +M68K = "m68k" +MICROBLAZE = "microblaze" +MICROBLAZEEL = "microblazeel" +MIPS = "mips" +MIPSEL = "mipsel" + +MIPS64 = "mips64" +MIPS64EL = "mips64el" +OPENRISC = "openrisc" +PARISC = "parisc" +PARISC64 = "parisc64" + +PPC = "ppc" +PPCLE = "ppcle" +PPC64 = "ppc64" +PPC64LE = "ppc64le" +PPCEMB = "ppcemb" + +S390 = "s390" +S390X = "s390x" +SH4 = "sh4" +SH4EB = "sh4eb" +SPARC = "sparc" + +SPARC64 = "sparc64" +UNICORE32 = "unicore32" +X86_64 = "x86_64" +XTENSA = "xtensa" +XTENSAEB = "xtensaeb" + + +ALL = [ + ALPHA, + ARMV6, + ARMV7, + ARMV7B, + + AARCH64, + CRIS, + I686, + IA64, + LM32, + + M68K, + MICROBLAZE, + MICROBLAZEEL, + MIPS, + MIPSEL, + + MIPS64, + MIPS64EL, + OPENRISC, + PARISC, + PARISC64, + + PPC, + PPCLE, + PPC64, + PPC64LE, + PPCEMB, + + S390, + S390X, + SH4, + SH4EB, + SPARC, + + SPARC64, + UNICORE32, + X86_64, + XTENSA, + XTENSAEB, +] + + +def from_host(): + """Get the architecture of the host OS + + :returns: the canonicalized host architecture + """ + + return canonicalize(os.uname()[4]) + + +def is_valid(name): + """Check if a string is a valid architecture + + :param name: architecture name to validate + + :returns: True if @name is valid + """ + + return name in ALL + + +def canonicalize(name): + """Canonicalize the architecture name + + :param name: architecture name to canonicalize + + :returns: a canonical architecture name + """ + + if name is None: + return None + + newname = name.lower() + + if newname in ("i386", "i486", "i586"): + newname = I686 + + if not is_valid(newname): + raise exception.InvalidArchitectureName(arch=name) + + return newname diff --git a/nova/exception.py b/nova/exception.py index 8d027dce6d..c63590dcf4 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1618,3 +1618,7 @@ class ImageVCPUTopologyRangeExceeded(Invalid): class ImageVCPULimitsRangeImpossible(Invalid): msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d " "are impossible to satisfy for vcpus count %(vcpus)d") + + +class InvalidArchitectureName(Invalid): + msg_fmt = _("Architecture name '%(arch)s' is not recognised") diff --git a/nova/tests/compute/test_arch.py b/nova/tests/compute/test_arch.py new file mode 100644 index 0000000000..80fa274ed8 --- /dev/null +++ b/nova/tests/compute/test_arch.py @@ -0,0 +1,56 @@ +# Copyright (C) 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import mock + +from nova.compute import arch +from nova import exception +from nova import test + + +class ArchTest(test.NoDBTestCase): + + @mock.patch.object(os, "uname") + def test_host(self, mock_uname): + os.uname.return_value = ( + 'Linux', + 'localhost.localdomain', + '3.14.8-200.fc20.x86_64', + '#1 SMP Mon Jun 16 21:57:53 UTC 2014', + 'i686' + ) + + self.assertEqual(arch.I686, arch.from_host()) + + def test_valid_string(self): + self.assertTrue(arch.is_valid("x86_64")) + + def test_valid_constant(self): + self.assertTrue(arch.is_valid(arch.X86_64)) + + def test_valid_bogus(self): + self.assertFalse(arch.is_valid("x86_64wibble")) + + def test_canonicalize_i386(self): + self.assertEqual(arch.I686, arch.canonicalize("i386")) + + def test_canonicalize_case(self): + self.assertEqual(arch.X86_64, arch.canonicalize("X86_64")) + + def test_canonicalize_bogus(self): + self.assertRaises(exception.InvalidArchitectureName, + arch.canonicalize, + "x86_64wibble") From 7bbf7ca4938ee01b86faee1e8eceb1f6700e67e4 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 25 Jul 2014 08:56:13 -0700 Subject: [PATCH 182/486] Stub out rpc notifications in ec2 cloud unit tests The ec2 cloud unit tests are running through the compute manager and if something fails and is wrapped with the wrap_exception decorator, we can get big ugly stack traces in the log. We should be using the fake_notifier in these tests like we do in the nova.tests.compute.test_compute setup. This also changes the test_ec2_validate test to use local conductor like the other cloud tests that are going through conductor. Change-Id: I59bed0903aeef4086872c10d93a06f34b072dce3 Partial-Bug: #1348661 --- nova/tests/api/ec2/test_cinder_cloud.py | 7 +++++++ nova/tests/api/ec2/test_cloud.py | 7 +++++++ nova/tests/api/ec2/test_ec2_validate.py | 10 ++++++++++ 3 files changed, 24 insertions(+) diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index 6a8c51dc1d..2c83e71b9c 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -34,6 +34,7 @@ from nova import test from nova.tests import cast_as_call from nova.tests import fake_network +from nova.tests import fake_notifier from nova.tests import fake_utils from nova.tests.image import fake from nova.tests import matchers @@ -126,6 +127,12 @@ def dumb(*args, **kwargs): # Short-circuit the conductor service self.flags(use_local=True, group='conductor') + # Stub out the notification service so we use the no-op serializer + # and avoid lazy-load traces with the wrap_exception decorator in + # the compute service. + fake_notifier.stub_notifier(self.stubs) + self.addCleanup(fake_notifier.reset) + # set up services self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 90f95e0ded..0b05f1145a 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -60,6 +60,7 @@ from nova.tests import cast_as_call from nova.tests import fake_block_device from nova.tests import fake_network +from nova.tests import fake_notifier from nova.tests import fake_utils from nova.tests.image import fake from nova.tests import matchers @@ -174,6 +175,12 @@ def dumb(*args, **kwargs): # Short-circuit the conductor service self.flags(use_local=True, group='conductor') + # Stub out the notification service so we use the no-op serializer + # and avoid lazy-load traces with the wrap_exception decorator in + # the compute service. + fake_notifier.stub_notifier(self.stubs) + self.addCleanup(fake_notifier.reset) + # set up services self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index 841def64e9..2af1b4fade 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -28,6 +28,7 @@ from nova import test from nova.tests import cast_as_call from nova.tests import fake_network +from nova.tests import fake_notifier from nova.tests.image import fake CONF = cfg.CONF @@ -48,6 +49,15 @@ def dumb(*args, **kwargs): # set up our cloud self.cloud = cloud.CloudController() + # Short-circuit the conductor service + self.flags(use_local=True, group='conductor') + + # Stub out the notification service so we use the no-op serializer + # and avoid lazy-load traces with the wrap_exception decorator in + # the compute service. + fake_notifier.stub_notifier(self.stubs) + self.addCleanup(fake_notifier.reset) + # set up services self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) From 6cadc3d1135ea7dfd032ee7d271be788fd987316 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 25 Jul 2014 09:29:29 -0700 Subject: [PATCH 183/486] Make several ec2 API tests inherit from NoDBTestCase There are several ec2 API test classes which aren't using the database so we should have them inherit from the NoDBTestCase base class. Change-Id: I6c2fcca72c93d6528a40813652c0bb6d6783ee09 --- nova/tests/api/ec2/test_api.py | 4 ++-- nova/tests/api/ec2/test_cloud.py | 2 +- nova/tests/api/ec2/test_ec2_validate.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/ec2/test_api.py b/nova/tests/api/ec2/test_api.py index 924497a802..664532fee5 100644 --- a/nova/tests/api/ec2/test_api.py +++ b/nova/tests/api/ec2/test_api.py @@ -96,7 +96,7 @@ def close(self): pass -class XmlConversionTestCase(test.TestCase): +class XmlConversionTestCase(test.NoDBTestCase): """Unit test api xml conversion.""" def test_number_conversion(self): conv = ec2utils._try_convert @@ -127,7 +127,7 @@ def test_number_conversion(self): self.assertEqual(conv(''), '') -class Ec2utilsTestCase(test.TestCase): +class Ec2utilsTestCase(test.NoDBTestCase): def test_ec2_id_to_id(self): self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29) diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 0b05f1145a..0fb9b018c1 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -3098,7 +3098,7 @@ def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid): mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id) -class CloudTestCaseNeutronProxy(test.TestCase): +class CloudTestCaseNeutronProxy(test.NoDBTestCase): def setUp(self): super(CloudTestCaseNeutronProxy, self).setUp() cfg.CONF.set_override('security_group_api', 'neutron') diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index 2af1b4fade..61861e6b57 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -181,7 +181,7 @@ def test_detach_volume(self): volume_id=ec2_id) -class EC2TimestampValidationTestCase(test.TestCase): +class EC2TimestampValidationTestCase(test.NoDBTestCase): """Test case for EC2 request timestamp validation.""" def test_validate_ec2_timestamp_valid(self): From 9f1ba4d75eb1a1866f23a0520b6d219356e574fa Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 24 Jul 2014 21:25:51 -0400 Subject: [PATCH 184/486] Fix doc build errors in models.py The docs build has errors like the following: nova/nova/db/sqlalchemy/models.py:docstring of nova.db.sqlalchemy.models.relationship:517: ERROR: Unknown interpreted text role "paramref". To fix this remove the import for relationship and refer to it instead using orm.relationship. Change-Id: I2e0a08438174f0fe0d7ea732670971e65811a700 --- nova/db/sqlalchemy/models.py | 62 ++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index ede89429cc..20198ed835 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -22,8 +22,8 @@ from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema from sqlalchemy.dialects.mysql import MEDIUMTEXT from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import orm from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float -from sqlalchemy.orm import relationship, backref, object_mapper from oslo.config import cfg from nova.db.sqlalchemy import types @@ -79,8 +79,8 @@ class ComputeNode(BASE, NovaBase): __table_args__ = () id = Column(Integer, primary_key=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=False) - service = relationship(Service, - backref=backref('compute_node'), + service = orm.relationship(Service, + backref=orm.backref('compute_node'), foreign_keys=service_id, primaryjoin='and_(' 'ComputeNode.service_id == Service.id,' @@ -180,7 +180,7 @@ def name(self): info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. - for column in iter(object_mapper(self).columns): + for column in iter(orm.object_mapper(self).columns): key = column.name # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. @@ -304,8 +304,8 @@ class InstanceInfoCache(BASE, NovaBase): instance_uuid = Column(String(36), ForeignKey('instances.uuid'), nullable=False) - instance = relationship(Instance, - backref=backref('info_cache', uselist=False), + instance = orm.relationship(Instance, + backref=orm.backref('info_cache', uselist=False), foreign_keys=instance_uuid, primaryjoin=instance_uuid == Instance.uuid) @@ -491,7 +491,7 @@ class Reservation(BASE, NovaBase): delta = Column(Integer, nullable=False) expire = Column(DateTime) - usage = relationship( + usage = orm.relationship( "QuotaUsage", foreign_keys=usage_id, primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' @@ -547,8 +547,8 @@ class BlockDeviceMapping(BASE, NovaBase): id = Column(Integer, primary_key=True, autoincrement=True) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - instance = relationship(Instance, - backref=backref('block_device_mapping'), + instance = orm.relationship(Instance, + backref=orm.backref('block_device_mapping'), foreign_keys=instance_uuid, primaryjoin='and_(BlockDeviceMapping.' 'instance_uuid==' @@ -599,8 +599,8 @@ class IscsiTarget(BASE, NovaBase): target_num = Column(Integer) host = Column(String(255)) volume_id = Column(String(36), ForeignKey('volumes.id')) - volume = relationship(Volume, - backref=backref('iscsi_target', uselist=False), + volume = orm.relationship(Volume, + backref=orm.backref('iscsi_target', uselist=False), foreign_keys=volume_id, primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' 'IscsiTarget.deleted==0)') @@ -631,7 +631,7 @@ class SecurityGroup(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) - instances = relationship(Instance, + instances = orm.relationship(Instance, secondary="security_group_instance_association", primaryjoin='and_(' 'SecurityGroup.id == ' @@ -654,7 +654,7 @@ class SecurityGroupIngressRule(BASE, NovaBase): id = Column(Integer, primary_key=True) parent_group_id = Column(Integer, ForeignKey('security_groups.id')) - parent_group = relationship("SecurityGroup", backref="rules", + parent_group = orm.relationship("SecurityGroup", backref="rules", foreign_keys=parent_group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' @@ -668,7 +668,7 @@ class SecurityGroupIngressRule(BASE, NovaBase): # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. group_id = Column(Integer, ForeignKey('security_groups.id')) - grantee_group = relationship("SecurityGroup", + grantee_group = orm.relationship("SecurityGroup", foreign_keys=group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' @@ -739,7 +739,7 @@ class Migration(BASE, NovaBase): # TODO(_cerberus_): enum status = Column(String(255)) - instance = relationship("Instance", foreign_keys=instance_uuid, + instance = orm.relationship("Instance", foreign_keys=instance_uuid, primaryjoin='and_(Migration.instance_uuid == ' 'Instance.uuid, Instance.deleted == ' '0)') @@ -847,14 +847,14 @@ class FixedIp(BASE, NovaBase): # TODO(sshturm) add default in db reserved = Column(Boolean, default=False) host = Column(String(255)) - network = relationship(Network, - backref=backref('fixed_ips'), + network = orm.relationship(Network, + backref=orm.backref('fixed_ips'), foreign_keys=network_id, primaryjoin='and_(' 'FixedIp.network_id == Network.id,' 'FixedIp.deleted == 0,' 'Network.deleted == 0)') - instance = relationship(Instance, + instance = orm.relationship(Instance, foreign_keys=instance_uuid, primaryjoin='and_(' 'FixedIp.instance_uuid == Instance.uuid,' @@ -883,8 +883,8 @@ class FloatingIp(BASE, NovaBase): # TODO(sshturm) add default in db pool = Column(String(255)) interface = Column(String(255)) - fixed_ip = relationship(FixedIp, - backref=backref('floating_ips'), + fixed_ip = orm.relationship(FixedIp, + backref=orm.backref('floating_ips'), foreign_keys=fixed_ip_id, primaryjoin='and_(' 'FloatingIp.fixed_ip_id == FixedIp.id,' @@ -936,7 +936,7 @@ class Console(BASE, NovaBase): password = Column(String(255)) port = Column(Integer) pool_id = Column(Integer, ForeignKey('console_pools.id')) - pool = relationship(ConsolePool, backref=backref('consoles')) + pool = orm.relationship(ConsolePool, backref=orm.backref('consoles')) class InstanceMetadata(BASE, NovaBase): @@ -949,7 +949,7 @@ class InstanceMetadata(BASE, NovaBase): key = Column(String(255)) value = Column(String(255)) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - instance = relationship(Instance, backref="metadata", + instance = orm.relationship(Instance, backref="metadata", foreign_keys=instance_uuid, primaryjoin='and_(' 'InstanceMetadata.instance_uuid == ' @@ -970,7 +970,7 @@ class InstanceSystemMetadata(BASE, NovaBase): primary_join = ('and_(InstanceSystemMetadata.instance_uuid == ' 'Instance.uuid, InstanceSystemMetadata.deleted == 0)') - instance = relationship(Instance, backref="system_metadata", + instance = orm.relationship(Instance, backref="system_metadata", foreign_keys=instance_uuid, primaryjoin=primary_join) @@ -988,7 +988,7 @@ class InstanceTypeProjects(BASE, NovaBase): nullable=False) project_id = Column(String(255)) - instance_type = relationship(InstanceTypes, backref="projects", + instance_type = orm.relationship(InstanceTypes, backref="projects", foreign_keys=instance_type_id, primaryjoin='and_(' 'InstanceTypeProjects.instance_type_id == InstanceTypes.id,' @@ -1012,7 +1012,7 @@ class InstanceTypeExtraSpecs(BASE, NovaBase): value = Column(String(255)) instance_type_id = Column(Integer, ForeignKey('instance_types.id'), nullable=False) - instance_type = relationship(InstanceTypes, backref="extra_specs", + instance_type = orm.relationship(InstanceTypes, backref="extra_specs", foreign_keys=instance_type_id, primaryjoin='and_(' 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' @@ -1075,13 +1075,13 @@ class Aggregate(BASE, NovaBase): __table_args__ = () id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) - _hosts = relationship(AggregateHost, + _hosts = orm.relationship(AggregateHost, primaryjoin='and_(' 'Aggregate.id == AggregateHost.aggregate_id,' 'AggregateHost.deleted == 0,' 'Aggregate.deleted == 0)') - _metadata = relationship(AggregateMetadata, + _metadata = orm.relationship(AggregateMetadata, primaryjoin='and_(' 'Aggregate.id == AggregateMetadata.aggregate_id,' 'AggregateMetadata.deleted == 0,' @@ -1335,15 +1335,15 @@ class InstanceGroup(BASE, NovaBase): project_id = Column(String(255)) uuid = Column(String(36), nullable=False) name = Column(String(255)) - _policies = relationship(InstanceGroupPolicy, primaryjoin='and_(' + _policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupPolicy.group_id,' 'InstanceGroupPolicy.deleted == 0,' 'InstanceGroup.deleted == 0)') - _metadata = relationship(InstanceGroupMetadata, primaryjoin='and_(' + _metadata = orm.relationship(InstanceGroupMetadata, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupMetadata.group_id,' 'InstanceGroupMetadata.deleted == 0,' 'InstanceGroup.deleted == 0)') - _members = relationship(InstanceGroupMember, primaryjoin='and_(' + _members = orm.relationship(InstanceGroupMember, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupMember.group_id,' 'InstanceGroupMember.deleted == 0,' 'InstanceGroup.deleted == 0)') @@ -1396,7 +1396,7 @@ class PciDevice(BASE, NovaBase): extra_info = Column(Text) instance_uuid = Column(String(36)) - instance = relationship(Instance, backref="pci_devices", + instance = orm.relationship(Instance, backref="pci_devices", foreign_keys=instance_uuid, primaryjoin='and_(' 'PciDevice.instance_uuid == Instance.uuid,' From a9232fdbb12a5b719316e41e5c845e25b582c782 Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Thu, 3 Jul 2014 14:27:27 +0200 Subject: [PATCH 185/486] libvirt: driver tests use non-mocked BDMs Ia99e04ad80a2823a6852debf0919e72b2f93bd26 introduces tests that completely mock out the nova.virt.DriverBlockDevice classes. While this is generally OK - it is more desirable to test the code with only the furthest bits from the object/method under test mocked. It was not practical to do it at the time due to __setattr__ behaviour which is now changed - so let's fix this too. Change-Id: I186e9c07aafb18b3d9010c5794bb7964f1ff0056 --- nova/tests/virt/libvirt/test_driver.py | 126 ++++++++++++++----------- nova/tests/virt/test_virt_drivers.py | 30 +++--- 2 files changed, 88 insertions(+), 68 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 2e918f670a..d0a0d36341 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -67,6 +67,7 @@ from nova.tests.virt import test_driver from nova import utils from nova import version +from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt.disk import api as disk from nova.virt import driver @@ -146,14 +147,6 @@ """} -def mocked_bdm(id, bdm_info): - bdm_mock = mock.MagicMock() - bdm_mock.__getitem__ = lambda s, k: bdm_info[k] - bdm_mock.get = lambda *k, **kw: bdm_info.get(*k, **kw) - bdm_mock.id = id - return bdm_mock - - def _concurrency(signal, wait, done, target, is_block_dev=False): signal.send() wait.wait() @@ -1216,25 +1209,33 @@ def test_get_guest_config_with_block_device(self): instance_ref = db.instance_create(self.context, self.test_instance) conn_info = {'driver_volume_type': 'fake'} - info = {'block_device_mapping': [ - mocked_bdm(1, {'connection_info': conn_info, - 'mount_device': '/dev/vdc'}), - mocked_bdm(2, {'connection_info': conn_info, - 'mount_device': '/dev/vdd'}), - ]} + info = {'block_device_mapping': driver_block_device.convert_volumes([ + fake_block_device.FakeDbBlockDeviceDict( + {'id': 1, + 'source_type': 'volume', 'destination_type': 'volume', + 'device_name': '/dev/vdc'}), + fake_block_device.FakeDbBlockDeviceDict( + {'id': 2, + 'source_type': 'volume', 'destination_type': 'volume', + 'device_name': '/dev/vdd'}), + ])} + info['block_device_mapping'][0]['connection_info'] = conn_info + info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, info) - cfg = conn._get_guest_config(instance_ref, [], {}, disk_info, + with mock.patch.object( + driver_block_device.DriverVolumeBlockDevice, 'save'): + cfg = conn._get_guest_config(instance_ref, [], {}, disk_info, None, info) - self.assertIsInstance(cfg.devices[2], - vconfig.LibvirtConfigGuestDisk) - self.assertEqual(cfg.devices[2].target_dev, 'vdc') - self.assertIsInstance(cfg.devices[3], - vconfig.LibvirtConfigGuestDisk) - self.assertEqual(cfg.devices[3].target_dev, 'vdd') - self.assertTrue(info['block_device_mapping'][0].save.called) - self.assertTrue(info['block_device_mapping'][1].save.called) + self.assertIsInstance(cfg.devices[2], + vconfig.LibvirtConfigGuestDisk) + self.assertEqual(cfg.devices[2].target_dev, 'vdc') + self.assertIsInstance(cfg.devices[3], + vconfig.LibvirtConfigGuestDisk) + self.assertEqual(cfg.devices[3].target_dev, 'vdd') + self.assertTrue(info['block_device_mapping'][0].save.called) + self.assertTrue(info['block_device_mapping'][1].save.called) def test_get_guest_config_with_configdrive(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) @@ -1274,30 +1275,37 @@ def test_get_guest_config_with_virtio_scsi_bus_bdm(self): image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}} instance_ref = db.instance_create(self.context, self.test_instance) conn_info = {'driver_volume_type': 'fake'} - bd_info = {'block_device_mapping': [ - mocked_bdm(1, {'connection_info': conn_info, - 'mount_device': '/dev/sdc', - 'disk_bus': 'scsi'}), - mocked_bdm(2, {'connection_info': conn_info, - 'mount_device': '/dev/sdd', - 'disk_bus': 'scsi'}), - ]} + bd_info = { + 'block_device_mapping': driver_block_device.convert_volumes([ + fake_block_device.FakeDbBlockDeviceDict( + {'id': 1, + 'source_type': 'volume', 'destination_type': 'volume', + 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}), + fake_block_device.FakeDbBlockDeviceDict( + {'id': 2, + 'source_type': 'volume', 'destination_type': 'volume', + 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}), + ])} + bd_info['block_device_mapping'][0]['connection_info'] = conn_info + bd_info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, bd_info, image_meta) - cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info, - [], bd_info) - self.assertIsInstance(cfg.devices[2], - vconfig.LibvirtConfigGuestDisk) - self.assertEqual(cfg.devices[2].target_dev, 'sdc') - self.assertEqual(cfg.devices[2].target_bus, 'scsi') - self.assertIsInstance(cfg.devices[3], - vconfig.LibvirtConfigGuestDisk) - self.assertEqual(cfg.devices[3].target_dev, 'sdd') - self.assertEqual(cfg.devices[3].target_bus, 'scsi') - self.assertIsInstance(cfg.devices[4], - vconfig.LibvirtConfigGuestController) - self.assertEqual(cfg.devices[4].model, 'virtio-scsi') + with mock.patch.object( + driver_block_device.DriverVolumeBlockDevice, 'save'): + cfg = conn._get_guest_config(instance_ref, [], image_meta, + disk_info, [], bd_info) + self.assertIsInstance(cfg.devices[2], + vconfig.LibvirtConfigGuestDisk) + self.assertEqual(cfg.devices[2].target_dev, 'sdc') + self.assertEqual(cfg.devices[2].target_bus, 'scsi') + self.assertIsInstance(cfg.devices[3], + vconfig.LibvirtConfigGuestDisk) + self.assertEqual(cfg.devices[3].target_dev, 'sdd') + self.assertEqual(cfg.devices[3].target_bus, 'scsi') + self.assertIsInstance(cfg.devices[4], + vconfig.LibvirtConfigGuestController) + self.assertEqual(cfg.devices[4].model, 'virtio-scsi') def test_get_guest_config_with_vnc(self): self.flags(vnc_enabled=True) @@ -7840,22 +7848,28 @@ def fake_baselineCPU(cpu, flag): self.stubs.Set(conn, '_lookup_by_name', fake_lookup_name) - block_device_info = {'block_device_mapping': [ - mocked_bdm(1, {'guest_format': None, + block_device_info = {'block_device_mapping': + driver_block_device.convert_volumes([ + fake_block_device.FakeDbBlockDeviceDict( + {'id': 1, 'guest_format': None, 'boot_index': 0, - 'mount_device': '/dev/vda', - 'connection_info': - {'driver_volume_type': 'iscsi'}, + 'source_type': 'volume', + 'destination_type': 'volume', + 'device_name': '/dev/vda', 'disk_bus': 'virtio', 'device_type': 'disk', 'delete_on_termination': False}), - ]} - conn.post_live_migration_at_destination(self.context, instance, - network_info, True, - block_device_info=block_device_info) - self.assertIn('fake', self.resultXML) - self.assertTrue( - block_device_info['block_device_mapping'][0].save.called) + ])} + block_device_info['block_device_mapping'][0]['connection_info'] = ( + {'driver_volume_type': 'iscsi'}) + with mock.patch.object( + driver_block_device.DriverVolumeBlockDevice, 'save'): + conn.post_live_migration_at_destination( + self.context, instance, network_info, True, + block_device_info=block_device_info) + self.assertTrue('fake' in self.resultXML) + self.assertTrue( + block_device_info['block_device_mapping'][0].save.called) def test_create_propagates_exceptions(self): self.flags(virt_type='lxc', group='libvirt') diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index 4d9af914c9..be248556b6 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -28,10 +28,11 @@ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import test +from nova.tests import fake_block_device from nova.tests.image import fake as fake_image from nova.tests import utils as test_utils from nova.tests.virt.libvirt import fake_libvirt_utils -from nova.tests.virt.libvirt import test_driver +from nova.virt import block_device as driver_block_device from nova.virt import event as virtevent from nova.virt import fake from nova.virt import libvirt @@ -456,24 +457,29 @@ def test_attach_detach_different_power_states(self): 'root_device_name': None, 'swap': None, 'ephemerals': [], - 'block_device_mapping': [ - test_driver.mocked_bdm(1, { - 'instance_uuid': instance_ref['uuid'], - 'connection_info': {'driver_volume_type': 'fake'}, - 'mount_device': '/dev/sda', + 'block_device_mapping': driver_block_device.convert_volumes([ + fake_block_device.FakeDbBlockDeviceDict( + {'id': 1, 'instance_uuid': instance_ref['uuid'], + 'device_name': '/dev/sda', + 'source_type': 'volume', + 'destination_type': 'volume', 'delete_on_termination': False, - 'virtual_name': None, 'snapshot_id': None, 'volume_id': 'abcdedf', 'volume_size': None, 'no_device': None }), - ] + ]) } - self.connection.power_on(self.ctxt, instance_ref, network_info, bdm) - self.connection.detach_volume(connection_info, - instance_ref, - '/dev/sda') + bdm['block_device_mapping'][0]['connection_info'] = ( + {'driver_volume_type': 'fake'}) + with mock.patch.object( + driver_block_device.DriverVolumeBlockDevice, 'save'): + self.connection.power_on( + self.ctxt, instance_ref, network_info, bdm) + self.connection.detach_volume(connection_info, + instance_ref, + '/dev/sda') @catch_notimplementederror def test_get_info(self): From 8f505b85268adc226ec0a83826c2d13edcbe3d7c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 8 Jul 2014 09:07:43 -0700 Subject: [PATCH 186/486] Stop depending on sitepackages libvirt-python We can install libvirt-python bindings from pypi now because the libvirt devs are supporting pypi packages and the version of libvirt we are testing nova master against is new enough to link against that pypi package. Making this switch will better isolate nova test environments as all dependencies can be installed without any site packages leaking into virtualenvs. It will also make the dependency on libvirt-python a bit more explicit. Change-Id: I2890702869f05a02ad5e2ecb419db06433231b36 --- test-requirements.txt | 1 + tox.ini | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index 282a4dda2b..9c67b37cb1 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,6 +3,7 @@ coverage>=3.6 discover feedparser fixtures>=0.3.14 +libvirt-python>=1.2.5 mock>=1.0 mox>=0.5.3 MySQL-python diff --git a/tox.ini b/tox.ini index 609f52e79d..dcf7a95350 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,6 @@ envlist = py26,py27,py33,pep8 skipsdist = True [testenv] -sitepackages = True usedevelop = True install_command = pip install -U --force-reinstall {opts} {packages} setenv = VIRTUAL_ENV={envdir} @@ -14,16 +13,13 @@ commands = python -m nova.openstack.common.lockutils python setup.py test --slowest --testr-args='{posargs}' [tox:jenkins] -sitepackages = True downloadcache = ~/cache/pip [testenv:pep8] -sitepackages = False commands = flake8 {posargs} [testenv:genconfig] -sitepackages = False commands = bash tools/config/generate_sample.sh -b . -p nova -o etc/nova @@ -48,9 +44,6 @@ commands = {posargs} [testenv:docs] commands = python setup.py build_sphinx -[testenv:py27local] -sitepackages = False - [flake8] # H803 skipped on purpose per list discussion. # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 From d3d2afeeb7e5092e77ac5d3381a61821a4bc5790 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 25 Jul 2014 12:45:06 -0700 Subject: [PATCH 187/486] Make devref point to official devstack vagrant repo. Now that we have an official devstack vagrant repo, lets point to that instead of my own half maintained repo. Change-Id: I944265d18f6e796608576ea71a7983aed0ece542 --- doc/source/devref/development.environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 255ece68a7..e95e2f9cbb 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -40,7 +40,7 @@ environments with venv are also available with the source code. The easiest way to build a fully functional development environment is with DevStack. Create a machine (such as a VM or Vagrant box) running a distribution supported by DevStack and install DevStack there. For -example, there is a Vagrant script for DevStack at https://github.com/jogo/DevstackUp. +example, there is a Vagrant script for DevStack at http://git.openstack.org/cgit/openstack-dev/devstack-vagrant/. .. note:: From 4f3e3cd1883ea5aca237bcc88b64128c743a6862 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 25 Jul 2014 12:52:45 -0700 Subject: [PATCH 188/486] Set python hash seed to 0 in tox.ini New tox (>=1.7.0) sets a random python hash seed by default. This is generally good for testing because it will help keep projects working regardless of the hash seed, but nova unittests don't currently pass with a random hash seed so set it to the python default seed. This change will allow us to use new tox again and remove the restriction on tox<=1.6.1 to run unittests. Redundant setenvs for specific test envs are removed to prevent them from overriding the PYTHONHASHSEED value. Note this change will need to be backported to the stable branches to keep unittests there working with new tox as well. Change-Id: Ib54364877a251db48c54dfdc43c503281ea1f04a --- tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 609f52e79d..be09f75ba9 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,10 @@ skipsdist = True sitepackages = True usedevelop = True install_command = pip install -U --force-reinstall {opts} {packages} +# Note the hash seed is set to 0 until nova can be tested with a +# random hash seed successfully. setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = @@ -28,13 +31,11 @@ commands = bash tools/config/generate_sample.sh -b . -p nova -o etc/nova [testenv:pylint] -setenv = VIRTUAL_ENV={envdir} commands = bash tools/lintstack.sh [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. -setenv = VIRTUAL_ENV={envdir} commands = coverage erase python -m nova.openstack.common.lockutils python setup.py testr --coverage \ From c4fc94e66c7b4955de7ee38b8dd68fd55a4890fa Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 14 May 2014 14:54:29 -0700 Subject: [PATCH 189/486] Allow dhcp_server to be set from new field Now that dhcp_server and gateway can be different, update dnsmasq to hand out the gateway if we have a single ip address and they don't match. Partially-implements blueprint better-support-for-multiple-networks Change-Id: Id040ce1fb0f889418815568ff6c80402b69bf09e --- nova/network/linux_net.py | 83 +++++++++++++++++----------- nova/network/manager.py | 4 +- nova/network/nova_ipam_lib.py | 2 + nova/tests/network/test_linux_net.py | 31 +++++++++-- 4 files changed, 80 insertions(+), 40 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index e88cf25f67..dd0c66f2f1 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -967,30 +967,44 @@ def _remove_dhcp_mangle_rule(dev): def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" + gateway = network_ref['gateway'] + # NOTE(vish): if we are in multi-host mode and we are not sharing + # addresses, then we actually need to hand out the + # dhcp server address as the gateway. + if network_ref['multi_host'] and not (network_ref['share_address'] or + CONF.share_dhcp_address): + gateway = network_ref['dhcp_server'] hosts = [] - host = None - if network_ref['multi_host']: - host = CONF.host - fixedips = objects.FixedIPList.get_by_network(context, network_ref, - host=host) - if fixedips: - instance_set = set([fixedip.instance_uuid for fixedip in fixedips]) - default_gw_vif = {} - for instance_uuid in instance_set: - vifs = objects.VirtualInterfaceList.get_by_instance_uuid( - context, instance_uuid) - if vifs: - # offer a default gateway to the first virtual interface - default_gw_vif[instance_uuid] = vifs[0].id - - for fixedip in fixedips: - if fixedip.allocated: - instance_uuid = fixedip.instance_uuid - if instance_uuid in default_gw_vif: - # we don't want default gateway for this fixed ip - if (default_gw_vif[instance_uuid] != - fixedip.virtual_interface_id): - hosts.append(_host_dhcp_opts(fixedip)) + if CONF.use_single_default_gateway: + # NOTE(vish): this will have serious performance implications if we + # are not in multi_host mode. + host = None + if network_ref['multi_host']: + host = CONF.host + fixedips = objects.FixedIPList.get_by_network(context, network_ref, + host=host) + if fixedips: + instance_set = set([fixedip.instance_uuid for fixedip in fixedips]) + default_gw_vif = {} + for instance_uuid in instance_set: + vifs = objects.VirtualInterfaceList.get_by_instance_uuid( + context, instance_uuid) + if vifs: + # offer a default gateway to the first virtual interface + default_gw_vif[instance_uuid] = vifs[0].id + + for fixedip in fixedips: + if fixedip.allocated: + instance_uuid = fixedip.instance_uuid + if instance_uuid in default_gw_vif: + # we don't want default gateway for this fixed ip + if (default_gw_vif[instance_uuid] != + fixedip.virtual_interface_id): + hosts.append(_host_dhcp_opts(fixedip)) + else: + hosts.append(_host_dhcp_opts(fixedip, gateway)) + else: + hosts.append(_host_dhcp_opts(None, gateway)) return '\n'.join(hosts) @@ -1043,12 +1057,9 @@ def restart_dhcp(context, dev, network_ref): """ conffile = _dhcp_file(dev, 'conf') - if CONF.use_single_default_gateway: - # NOTE(vish): this will have serious performance implications if we - # are not in multi_host mode. - optsfile = _dhcp_file(dev, 'opts') - write_to_file(optsfile, get_dhcp_opts(context, network_ref)) - os.chmod(optsfile, 0o644) + optsfile = _dhcp_file(dev, 'opts') + write_to_file(optsfile, get_dhcp_opts(context, network_ref)) + os.chmod(optsfile, 0o644) _add_dhcp_mangle_rule(dev) @@ -1081,6 +1092,7 @@ def restart_dhcp(context, dev, network_ref): '--bind-interfaces', '--conf-file=%s' % CONF.dnsmasq_config_file, '--pid-file=%s' % _dhcp_file(dev, 'pid'), + '--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts'), '--listen-address=%s' % network_ref['dhcp_server'], '--except-interface=lo', '--dhcp-range=set:%s,%s,static,%s,%ss' % @@ -1112,8 +1124,6 @@ def restart_dhcp(context, dev, network_ref): cmd.append('--no-resolv') for dns_server in dns_servers: cmd.append('--server=%s' % dns_server) - if CONF.use_single_default_gateway: - cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')] _execute(*cmd, run_as_root=True) @@ -1197,9 +1207,16 @@ def _host_dns(fixedip): CONF.dhcp_domain) -def _host_dhcp_opts(fixedip): +def _host_dhcp_opts(fixedip=None, gateway=None): """Return an empty gateway option.""" - return '%s,%s' % (_host_dhcp_network(fixedip), 3) + values = [] + if fixedip: + values.append(_host_dhcp_network(fixedip)) + # NOTE(vish): 3 is the dhcp option for gateway. + values.append('3') + if gateway: + values.append('%s' % gateway) + return ','.join(values) def _execute(*cmd, **kwargs): diff --git a/nova/network/manager.py b/nova/network/manager.py index 4e4ae7fc70..d54730565a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -313,8 +313,10 @@ def _uses_shared_ip(network): @utils.synchronized('get_dhcp') def _get_dhcp_ip(self, context, network_ref, host=None): """Get the proper dhcp address to listen on.""" + # NOTE(vish): If we are sharing the dhcp_address then we can just + # return the dhcp_server from the database. if self._uses_shared_ip(network_ref): - return network_ref['gateway'] + return network_ref.get('dhcp_server') or network_ref['gateway'] if not host: host = self.host diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py index 295af08c4c..f49904fca5 100644 --- a/nova/network/nova_ipam_lib.py +++ b/nova/network/nova_ipam_lib.py @@ -45,6 +45,7 @@ def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None): 'network_id': n.uuid, 'cidr': n.cidr, 'gateway': n.gateway, + 'dhcp_server': getattr(n, 'dhcp_server'), 'broadcast': n.broadcast, 'netmask': n.netmask, 'version': 4, @@ -57,6 +58,7 @@ def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None): 'network_id': n.uuid, 'cidr': n.cidr_v6, 'gateway': n.gateway_v6, + 'dhcp_server': None, 'broadcast': None, 'netmask': n.netmask_v6, 'version': 6, diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py index 76be0dd755..0b95652f5a 100644 --- a/nova/tests/network/test_linux_net.py +++ b/nova/tests/network/test_linux_net.py @@ -88,7 +88,11 @@ 'vlan': None, 'host': None, 'project_id': 'fake_project', - 'vpn_public_address': '192.168.0.2'}, + 'vpn_public_address': '192.168.0.2', + 'mtu': None, + 'dhcp_server': '192.168.0.1', + 'enable_dhcp': True, + 'share_address': False}, {'id': 1, 'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", 'label': 'test1', @@ -110,7 +114,11 @@ 'vlan': None, 'host': None, 'project_id': 'fake_project', - 'vpn_public_address': '192.168.1.2'}] + 'vpn_public_address': '192.168.1.2', + 'mtu': None, + 'dhcp_server': '192.168.1.1', + 'enable_dhcp': True, + 'share_address': False}] fixed_ips = [{'id': 0, @@ -405,14 +413,22 @@ def test_get_dns_hosts_for_nw01(self): self.assertEqual(actual_hosts, expected) def test_get_dhcp_opts_for_nw00(self): - expected_opts = 'NW-3,3\nNW-4,3' + self.flags(use_single_default_gateway=True) + expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3' + actual_opts = self.driver.get_dhcp_opts(self.context, networks[0]) + + self.assertEqual(actual_opts, expected_opts) + + def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self): + self.flags(use_single_default_gateway=False) + expected_opts = '3,192.168.0.1' actual_opts = self.driver.get_dhcp_opts(self.context, networks[0]) self.assertEqual(actual_opts, expected_opts) def test_get_dhcp_opts_for_nw01(self): - self.flags(host='fake_instance01') - expected_opts = "NW-5,3" + self.flags(use_single_default_gateway=True, host='fake_instance01') + expected_opts = "NW-2,3,192.168.1.1\nNW-5,3" actual_opts = self.driver.get_dhcp_opts(self.context, networks[1]) self.assertEqual(actual_opts, expected_opts) @@ -571,12 +587,14 @@ def test_ensure(bridge, interface, network, gateway): def _test_dnsmasq_execute(self, extra_expected=None): network_ref = {'id': 'fake', 'label': 'fake', + 'gateway': '10.0.0.1', 'multi_host': False, 'cidr': '10.0.0.0/24', 'netmask': '255.255.255.0', 'dns1': '8.8.4.4', 'dhcp_start': '1.0.0.2', - 'dhcp_server': '10.0.0.1'} + 'dhcp_server': '10.0.0.1', + 'share_address': False} def fake_execute(*args, **kwargs): executes.append(args) @@ -607,6 +625,7 @@ def fake_add_dhcp_mangle_rule(*args, **kwargs): '--bind-interfaces', '--conf-file=%s' % CONF.dnsmasq_config_file, '--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'), + '--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'), '--listen-address=%s' % network_ref['dhcp_server'], '--except-interface=lo', "--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'], From 6ee9fa429c97aee1507a0a4d21d885845fa0b85e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 14 May 2014 14:58:32 -0700 Subject: [PATCH 190/486] Only use dhcp if enable_dhcp is set on the network Also removes an unused call to init_host if there is no network to setup. The signature of this call was wrong so clearly it was not being used. Partially-implements blueprint better-support-for-multiple-networks Change-Id: I33c86b1500f9a69081b4c6ecbe79151f10f69cea --- nova/network/l3.py | 5 ++--- nova/network/manager.py | 35 +++++++++++++++++++++-------------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/nova/network/l3.py b/nova/network/l3.py index 00f62ed971..b7901dc11f 100644 --- a/nova/network/l3.py +++ b/nova/network/l3.py @@ -85,9 +85,8 @@ def initialize(self, **kwargs): networks = kwargs.get('networks', None) if not fixed_range and networks is not None: for network in networks: - self.initialize_network(network['cidr']) - else: - linux_net.init_host() + if network['enable_dhcp']: + self.initialize_network(network['cidr']) linux_net.ensure_metadata_ip() linux_net.metadata_forward() self.initialized = True diff --git a/nova/network/manager.py b/nova/network/manager.py index d54730565a..e47d2adc25 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1400,6 +1400,11 @@ def rpc_setup_network_on_host(self, context, network_id, teardown): network = objects.Network.get_by_id(context, network_id) call_func(context, network) + def _initialize_network(self, network): + if network.enable_dhcp: + self.l3driver.initialize_network(network.cidr) + self.l3driver.initialize_gateway(network) + def _setup_network_on_host(self, context, network): """Sets up network on this host.""" raise NotImplementedError() @@ -1730,12 +1735,12 @@ def init_host(self): def _setup_network_on_host(self, context, network): """Sets up network on this host.""" - network['dhcp_server'] = self._get_dhcp_ip(context, network) + network.dhcp_server = self._get_dhcp_ip(context, network) - self.l3driver.initialize_network(network.get('cidr')) - self.l3driver.initialize_gateway(network) + self._initialize_network(network) - if not CONF.fake_network: + # NOTE(vish): if dhcp server is not set then don't dhcp + if not CONF.fake_network and network.enable_dhcp: dev = self.driver.get_dev(network) # NOTE(dprince): dhcp DB queries require elevated context elevated = context.elevated() @@ -1747,7 +1752,8 @@ def _setup_network_on_host(self, context, network): network.save() def _teardown_network_on_host(self, context, network): - if not CONF.fake_network: + # NOTE(vish): if dhcp server is not set then don't dhcp + if not CONF.fake_network and network.enable_dhcp: network['dhcp_server'] = self._get_dhcp_ip(context, network) dev = self.driver.get_dev(network) # NOTE(dprince): dhcp DB queries require elevated context @@ -1964,8 +1970,7 @@ def _setup_network_on_host(self, context, network): address = network.vpn_public_address network.dhcp_server = self._get_dhcp_ip(context, network) - self.l3driver.initialize_network(network.get('cidr')) - self.l3driver.initialize_gateway(network) + self._initialize_network(network) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. @@ -1977,8 +1982,9 @@ def _setup_network_on_host(self, context, network): if not CONF.fake_network: dev = self.driver.get_dev(network) # NOTE(dprince): dhcp DB queries require elevated context - elevated = context.elevated() - self.driver.update_dhcp(elevated, dev, network) + if network.enable_dhcp: + elevated = context.elevated() + self.driver.update_dhcp(elevated, dev, network) if CONF.use_ipv6: self.driver.update_ra(context, dev, network) gateway = utils.get_my_linklocal(dev) @@ -1990,9 +1996,6 @@ def _teardown_network_on_host(self, context, network): if not CONF.fake_network: network['dhcp_server'] = self._get_dhcp_ip(context, network) dev = self.driver.get_dev(network) - # NOTE(dprince): dhcp DB queries require elevated context - elevated = context.elevated() - self.driver.update_dhcp(elevated, dev, network) # NOTE(ethuleau): For multi hosted networks, if the network is no # more used on this host and if VPN forwarding rule aren't handed @@ -2003,7 +2006,8 @@ def _teardown_network_on_host(self, context, network): not objects.Network.in_use_on_host(context, network['id'], self.host)): LOG.debug("Remove unused gateway %s", network['bridge']) - self.driver.kill_dhcp(dev) + if network.enable_dhcp: + self.driver.kill_dhcp(dev) self.l3driver.remove_gateway(network) if not self._uses_shared_ip(network): fip = objects.FixedIP.get_by_address(context, @@ -2011,7 +2015,10 @@ def _teardown_network_on_host(self, context, network): fip.allocated = False fip.host = None fip.save() - else: + # NOTE(vish): if dhcp server is not set then don't dhcp + elif network.enable_dhcp: + # NOTE(dprince): dhcp DB queries require elevated context + elevated = context.elevated() self.driver.update_dhcp(elevated, dev, network) def _get_network_dict(self, network): From 59ac254bf15bb059cca12a82c9d819c371ea5c6f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 18 May 2014 10:51:34 -0700 Subject: [PATCH 191/486] Properly handle snatting for external gateways Previously, snatting for netowrks with external gateways had to be handled by manually setting: * public_interface = '' * force_snat_range = snat_range Whereas networks with a gateway managed by nova needed: * public_interface = eth0 * force_snat_range = None or ['0.0.0.0/0'] In order to support both simultaneously, we calcualate whether the gateway is external, and use the force_snat_range and skip interface if it is an external gateway. Otherwise, we use the setting for public interface and 0.0.0.0/0. This allows external and internal gateways to co-exist. Partially-implements blueprint better-support-for-multiple-networks Change-Id: Id5ec66946c3a82841a6dd2d019404e822cdafe64 --- nova/network/l3.py | 8 ++++--- nova/network/linux_net.py | 26 +++++++++++++++-------- nova/network/manager.py | 4 +++- nova/tests/network/test_linux_net.py | 31 +++++++++++++++++++++------- nova/tests/network/test_manager.py | 3 +++ 5 files changed, 52 insertions(+), 20 deletions(-) diff --git a/nova/network/l3.py b/nova/network/l3.py index b7901dc11f..ea247d0ad8 100644 --- a/nova/network/l3.py +++ b/nova/network/l3.py @@ -86,7 +86,9 @@ def initialize(self, **kwargs): if not fixed_range and networks is not None: for network in networks: if network['enable_dhcp']: - self.initialize_network(network['cidr']) + is_ext = (network['dhcp_server'] is not None and + network['dhcp_server'] != network['gateway']) + self.initialize_network(network['cidr'], is_ext) linux_net.ensure_metadata_ip() linux_net.metadata_forward() self.initialized = True @@ -94,8 +96,8 @@ def initialize(self, **kwargs): def is_initialized(self): return self.initialized - def initialize_network(self, cidr): - linux_net.init_host(cidr) + def initialize_network(self, cidr, is_external): + linux_net.init_host(cidr, is_external) def initialize_gateway(self, network_ref): mac_address = utils.generate_mac_address() diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index dd0c66f2f1..c929280a00 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -666,29 +666,37 @@ def metadata_accept(): iptables_manager.apply() -def add_snat_rule(ip_range): +def add_snat_rule(ip_range, is_external=False): if CONF.routing_source_ip: - for dest_range in CONF.force_snat_range or ['0.0.0.0/0']: + if is_external: + if CONF.force_snat_range: + snat_range = CONF.force_snat_range + else: + snat_range = [] + else: + snat_range = ['0.0.0.0/0'] + for dest_range in snat_range: rule = ('-s %s -d %s -j SNAT --to-source %s' % (ip_range, dest_range, CONF.routing_source_ip)) - if CONF.public_interface: + if not is_external and CONF.public_interface: rule += ' -o %s' % CONF.public_interface iptables_manager.ipv4['nat'].add_rule('snat', rule) iptables_manager.apply() -def init_host(ip_range): +def init_host(ip_range, is_external=False): """Basic networking setup goes here.""" # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - add_snat_rule(ip_range) + add_snat_rule(ip_range, is_external) rules = [] - for snat_range in CONF.force_snat_range: - rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s ' - '-j redirect --redirect-target ACCEPT' % - (ip_range, snat_range)) + if is_external: + for snat_range in CONF.force_snat_range: + rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s ' + '-j redirect --redirect-target ACCEPT' % + (ip_range, snat_range)) if rules: ensure_ebtables_rules(rules, 'nat') diff --git a/nova/network/manager.py b/nova/network/manager.py index e47d2adc25..dc8206765a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1402,7 +1402,9 @@ def rpc_setup_network_on_host(self, context, network_id, teardown): def _initialize_network(self, network): if network.enable_dhcp: - self.l3driver.initialize_network(network.cidr) + is_ext = (network.dhcp_server is not None and + network.dhcp_server != network.gateway) + self.l3driver.initialize_network(network.cidr, is_ext) self.l3driver.initialize_gateway(network) def _setup_network_on_host(self, context, network): diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py index 0b95652f5a..cfd5725540 100644 --- a/nova/tests/network/test_linux_net.py +++ b/nova/tests/network/test_linux_net.py @@ -298,27 +298,44 @@ def get_instance(_context, instance_id): self.stubs.Set(db, 'instance_get', get_instance) self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated) - def _test_add_snat_rule(self, expected): + def _test_add_snat_rule(self, expected, is_external): + def verify_add_rule(chain, rule): self.assertEqual(chain, 'snat') self.assertEqual(rule, expected) + self.called = True self.stubs.Set(linux_net.iptables_manager.ipv4['nat'], 'add_rule', verify_add_rule) - linux_net.add_snat_rule('10.0.0.0/24') + self.called = False + linux_net.add_snat_rule('10.0.0.0/24', is_external) + if expected: + self.assertTrue(self.called) - def test_add_snat_rule(self): + def test_add_snat_rule_no_ext(self): self.flags(routing_source_ip='10.10.10.1') expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 ' '-j SNAT --to-source 10.10.10.1 -o eth0') - self._test_add_snat_rule(expected) + self._test_add_snat_rule(expected, False) + + def test_add_snat_rule_ext(self): + self.flags(routing_source_ip='10.10.10.1') + expected = () + self._test_add_snat_rule(expected, True) - def test_add_snat_rule_snat_range(self): + def test_add_snat_rule_snat_range_no_ext(self): self.flags(routing_source_ip='10.10.10.1', force_snat_range=['10.10.10.0/24']) - expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 ' + expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 ' '-j SNAT --to-source 10.10.10.1 -o eth0') - self._test_add_snat_rule(expected) + self._test_add_snat_rule(expected, False) + + def test_add_snat_rule_snat_range_ext(self): + self.flags(routing_source_ip='10.10.10.1', + force_snat_range=['10.10.10.0/24']) + expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 ' + '-j SNAT --to-source 10.10.10.1') + self._test_add_snat_rule(expected, True) def test_update_dhcp_for_nw00(self): self.flags(use_single_default_gateway=True) diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index e2f62e2617..7942228cd6 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -76,6 +76,7 @@ 'bridge': 'fa0', 'bridge_interface': 'fake_fa0', 'gateway': '192.168.0.1', + 'dhcp_server': '192.168.0.1', 'broadcast': '192.168.0.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', @@ -98,6 +99,7 @@ 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.1.1', + 'dhcp_server': '192.168.1.1', 'broadcast': '192.168.1.255', 'dns1': '192.168.0.1', 'dns2': '192.168.0.2', @@ -2281,6 +2283,7 @@ def _test_init_host_dynamic_fixed_range(self, net_manager): 'bridge': 'fa1', 'bridge_interface': 'fake_fa1', 'gateway': '192.168.2.1', + 'dhcp_server': '192.168.2.1', 'broadcast': '192.168.2.255', 'dns1': '192.168.2.1', 'dns2': '192.168.2.2', From 8230b751fbb4c41a3da987fea335d06f05e07272 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Wed, 22 Jan 2014 14:44:11 -0800 Subject: [PATCH 192/486] Move libvirt RBD utilities to a new file This will make it easier to share rbd-related code with cinder and glance. Port the applicable unit tests over from cinder. Closes-Bug: #1346092 Co-Authored-By: Dmitry Borodaenko Signed-off-by: Josh Durgin Signed-off-by: Dmitry Borodaenko Change-Id: I614b8e2bf0ea9e71a73772cbabcf812b39fa8a34 --- nova/tests/virt/libvirt/test_imagebackend.py | 34 +--- nova/tests/virt/libvirt/test_rbd.py | 169 +++++++++++++++++++ nova/virt/libvirt/imagebackend.py | 136 ++------------- nova/virt/libvirt/rbd.py | 147 ++++++++++++++++ 4 files changed, 340 insertions(+), 146 deletions(-) create mode 100644 nova/tests/virt/libvirt/test_rbd.py create mode 100644 nova/virt/libvirt/rbd.py diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py index 871427cf44..bb92f30ec6 100644 --- a/nova/tests/virt/libvirt/test_imagebackend.py +++ b/nova/tests/virt/libvirt/test_imagebackend.py @@ -18,7 +18,6 @@ import tempfile import fixtures -import mock from oslo.config import cfg import inspect @@ -30,6 +29,7 @@ from nova.tests import fake_processutils from nova.tests.virt.libvirt import fake_libvirt_utils from nova.virt.libvirt import imagebackend +from nova.virt.libvirt import rbd CONF = cfg.CONF @@ -671,14 +671,8 @@ def setUp(self): group='libvirt') self.libvirt_utils = imagebackend.libvirt_utils self.utils = imagebackend.utils - self.rbd = self.mox.CreateMockAnything() - self.rados = self.mox.CreateMockAnything() - - def prepare_mocks(self): - fn = self.mox.CreateMockAnything() - self.mox.StubOutWithMock(imagebackend, 'rbd') - self.mox.StubOutWithMock(imagebackend, 'rados') - return fn + self.mox.StubOutWithMock(rbd, 'rbd') + self.mox.StubOutWithMock(rbd, 'rados') def test_cache(self): image = self.image_class(self.INSTANCE, self.NAME) @@ -746,10 +740,10 @@ def test_cache_template_exists(self): self.mox.VerifyAll() def test_create_image(self): - fn = self.prepare_mocks() - fn(max_size=None, rbd=self.rbd, target=self.TEMPLATE_PATH) + fn = self.mox.CreateMockAnything() + fn(max_size=None, target=self.TEMPLATE_PATH) - self.rbd.RBD_FEATURE_LAYERING = 1 + rbd.rbd.RBD_FEATURE_LAYERING = 1 self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size') imagebackend.disk.get_disk_size(self.TEMPLATE_PATH @@ -762,7 +756,7 @@ def test_create_image(self): self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) - image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd) + image.create_image(fn, self.TEMPLATE_PATH, None) self.mox.VerifyAll() @@ -771,8 +765,6 @@ def test_prealloc_image(self): fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) - self.mox.StubOutWithMock(imagebackend, 'rbd') - self.mox.StubOutWithMock(imagebackend, 'rados') image = self.image_class(self.INSTANCE, self.NAME) def fake_fetch(target, *args, **kwargs): @@ -807,16 +799,6 @@ def test_image_path(self): self.assertEqual(image.path, rbd_path) - def test_resize(self): - image = self.image_class(self.INSTANCE, self.NAME) - with mock.patch.object(imagebackend, "RBDVolumeProxy") as mock_proxy: - volume_mock = mock.Mock() - mock_proxy.side_effect = [mock_proxy] - mock_proxy.__enter__.side_effect = [volume_mock] - - image._resize(image.rbd_name, self.SIZE) - volume_mock.resize.assert_called_once_with(self.SIZE) - class BackendTestCase(test.NoDBTestCase): INSTANCE = {'name': 'fake-instance', @@ -859,6 +841,8 @@ def test_image_rbd(self): pool = "FakePool" self.flags(images_rbd_pool=pool, group='libvirt') self.flags(images_rbd_ceph_conf=conf, group='libvirt') + self.mox.StubOutWithMock(rbd, 'rbd') + self.mox.StubOutWithMock(rbd, 'rados') self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd) def test_image_default(self): diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/virt/libvirt/test_rbd.py new file mode 100644 index 0000000000..93a4bed4bb --- /dev/null +++ b/nova/tests/virt/libvirt/test_rbd.py @@ -0,0 +1,169 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock + +from nova.openstack.common import log as logging +from nova import test +from nova import utils +from nova.virt.libvirt import rbd + + +LOG = logging.getLogger(__name__) + + +CEPH_MON_DUMP = """dumped monmap epoch 1 +{ "epoch": 1, + "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", + "modified": "2013-05-22 17:44:56.343618", + "created": "2013-05-22 17:44:56.343618", + "mons": [ + { "rank": 0, + "name": "a", + "addr": "[::1]:6789\/0"}, + { "rank": 1, + "name": "b", + "addr": "[::1]:6790\/0"}, + { "rank": 2, + "name": "c", + "addr": "[::1]:6791\/0"}, + { "rank": 3, + "name": "d", + "addr": "127.0.0.1:6792\/0"}, + { "rank": 4, + "name": "e", + "addr": "example.com:6791\/0"}], + "quorum": [ + 0, + 1, + 2]} +""" + + +class RbdTestCase(test.NoDBTestCase): + + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def setUp(self, mock_rados, mock_rbd): + super(RbdTestCase, self).setUp() + + self.mock_rados = mock_rados + self.mock_rados.Rados = mock.Mock + self.mock_rados.Rados.ioctx = mock.Mock() + self.mock_rados.Rados.connect = mock.Mock() + self.mock_rados.Rados.shutdown = mock.Mock() + self.mock_rados.Rados.open_ioctx = mock.Mock() + self.mock_rados.Rados.open_ioctx.return_value = \ + self.mock_rados.Rados.ioctx + self.mock_rados.Error = Exception + + self.mock_rbd = mock_rbd + self.mock_rbd.RBD = mock.Mock + self.mock_rbd.Image = mock.Mock + self.mock_rbd.Image.close = mock.Mock() + self.mock_rbd.RBD.Error = Exception + + self.rbd_pool = 'rbd' + self.driver = rbd.RBDDriver(self.rbd_pool, None, None) + + self.volume_name = u'volume-00000001' + + def tearDown(self): + super(RbdTestCase, self).tearDown() + + @mock.patch.object(utils, 'execute') + def test_get_mon_addrs(self, mock_execute): + mock_execute.return_value = (CEPH_MON_DUMP, '') + hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] + ports = ['6789', '6790', '6791', '6792', '6791'] + self.assertEqual((hosts, ports), self.driver.get_mon_addrs()) + + @mock.patch.object(rbd, 'RBDVolumeProxy') + def test_resize(self, mock_proxy): + size = 1024 + proxy = mock_proxy.return_value + proxy.__enter__.return_value = proxy + self.driver.resize(self.volume_name, size) + proxy.resize.assert_called_once_with(size) + + @mock.patch.object(rbd.RBDDriver, '_disconnect_from_rados') + @mock.patch.object(rbd.RBDDriver, '_connect_to_rados') + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def test_rbd_volume_proxy_init(self, mock_rados, mock_rbd, + mock_connect_from_rados, + mock_disconnect_from_rados): + mock_connect_from_rados.return_value = (None, None) + mock_disconnect_from_rados.return_value = (None, None) + + with rbd.RBDVolumeProxy(self.driver, self.volume_name): + mock_connect_from_rados.assert_called_once_with(None) + self.assertFalse(mock_disconnect_from_rados.called) + + mock_disconnect_from_rados.assert_called_once_with(None, None) + + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def test_connect_to_rados_default(self, mock_rados, mock_rbd): + ret = self.driver._connect_to_rados() + self.assertTrue(self.mock_rados.Rados.connect.called) + self.assertTrue(self.mock_rados.Rados.open_ioctx.called) + self.assertIsInstance(ret[0], self.mock_rados.Rados) + self.assertEqual(ret[1], self.mock_rados.Rados.ioctx) + self.mock_rados.Rados.open_ioctx.assert_called_with(self.rbd_pool) + + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd): + ret = self.driver._connect_to_rados('alt_pool') + self.assertTrue(self.mock_rados.Rados.connect.called) + self.assertTrue(self.mock_rados.Rados.open_ioctx.called) + self.assertIsInstance(ret[0], self.mock_rados.Rados) + self.assertEqual(ret[1], self.mock_rados.Rados.ioctx) + self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool') + + @mock.patch.object(rbd, 'rados') + def test_connect_to_rados_error(self, mock_rados): + mock_rados.Rados.open_ioctx.side_effect = mock_rados.Error + self.assertRaises(mock_rados.Error, self.driver._connect_to_rados) + mock_rados.Rados.open_ioctx.assert_called_once_with(self.rbd_pool) + mock_rados.Rados.shutdown.assert_called_once_with() + + def test_ceph_args_none(self): + self.driver.rbd_user = None + self.driver.ceph_conf = None + self.assertEqual([], self.driver.ceph_args()) + + def test_ceph_args_rbd_user(self): + self.driver.rbd_user = 'foo' + self.driver.ceph_conf = None + self.assertEqual(['--id', 'foo'], self.driver.ceph_args()) + + def test_ceph_args_ceph_conf(self): + self.driver.rbd_user = None + self.driver.ceph_conf = '/path/bar.conf' + self.assertEqual(['--conf', '/path/bar.conf'], + self.driver.ceph_args()) + + def test_ceph_args_rbd_user_and_ceph_conf(self): + self.driver.rbd_user = 'foo' + self.driver.ceph_conf = '/path/bar.conf' + self.assertEqual(['--id', 'foo', '--conf', '/path/bar.conf'], + self.driver.ceph_args()) + + @mock.patch.object(rbd, 'RBDVolumeProxy') + def test_exists(self, mock_proxy): + proxy = mock_proxy.return_value + self.assertTrue(self.driver.exists(self.volume_name)) + proxy.__enter__.assert_called_once_with() + proxy.__exit__.assert_called_once_with(None, None, None) diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index a79934dfa4..3a510ca295 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -33,17 +33,9 @@ from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import lvm +from nova.virt.libvirt import rbd from nova.virt.libvirt import utils as libvirt_utils - -try: - import rados - import rbd -except ImportError: - rados = None - rbd = None - - __imagebackend_opts = [ cfg.StrOpt('images_type', default='default', @@ -76,6 +68,8 @@ CONF.register_opts(__imagebackend_opts, 'libvirt') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('preallocate_images', 'nova.virt.driver') +CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume', group='libvirt') +CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume', group='libvirt') LOG = logging.getLogger(__name__) @@ -488,51 +482,6 @@ def snapshot_extract(self, target, out_format): run_as_root=True) -class RBDVolumeProxy(object): - """Context manager for dealing with an existing rbd volume. - - This handles connecting to rados and opening an ioctx automatically, and - otherwise acts like a librbd Image object. - - The underlying librados client and ioctx can be accessed as the attributes - 'client' and 'ioctx'. - """ - def __init__(self, driver, name, pool=None): - client, ioctx = driver._connect_to_rados(pool) - try: - self.volume = driver.rbd.Image(ioctx, str(name), snapshot=None) - except driver.rbd.Error: - LOG.exception(_LE("error opening rbd image %s"), name) - driver._disconnect_from_rados(client, ioctx) - raise - self.driver = driver - self.client = client - self.ioctx = ioctx - - def __enter__(self): - return self - - def __exit__(self, type_, value, traceback): - try: - self.volume.close() - finally: - self.driver._disconnect_from_rados(self.client, self.ioctx) - - def __getattr__(self, attrib): - return getattr(self.volume, attrib) - - -def ascii_str(s): - """Convert a string to ascii, or return None if the input is None. - - This is useful when a parameter is None by default, or a string. LibRBD - only accepts ascii, hence the need for conversion. - """ - if s is None: - return s - return str(s) - - class Rbd(Image): def __init__(self, instance=None, disk_name=None, path=None, **kwargs): super(Rbd, self).__init__("block", "rbd", is_block_dev=True) @@ -549,10 +498,13 @@ def __init__(self, instance=None, disk_name=None, path=None, **kwargs): ' images_rbd_pool' ' flag to use rbd images.')) self.pool = CONF.libvirt.images_rbd_pool - self.ceph_conf = ascii_str(CONF.libvirt.images_rbd_ceph_conf) - self.rbd_user = ascii_str(CONF.libvirt.rbd_user) - self.rbd = kwargs.get('rbd', rbd) - self.rados = kwargs.get('rados', rados) + self.rbd_user = CONF.libvirt.rbd_user + self.ceph_conf = CONF.libvirt.images_rbd_ceph_conf + + self.driver = rbd.RBDDriver( + pool=self.pool, + ceph_conf=self.ceph_conf, + rbd_user=self.rbd_user) self.path = 'rbd:%s/%s' % (self.pool, self.rbd_name) if self.rbd_user: @@ -560,52 +512,6 @@ def __init__(self, instance=None, disk_name=None, path=None, **kwargs): if self.ceph_conf: self.path += ':conf=' + self.ceph_conf - def _connect_to_rados(self, pool=None): - client = self.rados.Rados(rados_id=self.rbd_user, - conffile=self.ceph_conf) - try: - client.connect() - pool_to_open = str(pool or self.pool) - ioctx = client.open_ioctx(pool_to_open) - return client, ioctx - except self.rados.Error: - # shutdown cannot raise an exception - client.shutdown() - raise - - def _disconnect_from_rados(self, client, ioctx): - # closing an ioctx cannot raise an exception - ioctx.close() - client.shutdown() - - def _supports_layering(self): - return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') - - def _ceph_args(self): - args = [] - if self.rbd_user: - args.extend(['--id', self.rbd_user]) - if self.ceph_conf: - args.extend(['--conf', self.ceph_conf]) - return args - - def _get_mon_addrs(self): - args = ['ceph', 'mon', 'dump', '--format=json'] + self._ceph_args() - out, _ = utils.execute(*args) - lines = out.split('\n') - if lines[0].startswith('dumped monmap epoch'): - lines = lines[1:] - monmap = jsonutils.loads('\n'.join(lines)) - addrs = [mon['addr'] for mon in monmap['mons']] - hosts = [] - ports = [] - for addr in addrs: - host_port = addr[:addr.rindex('/')] - host, port = host_port.rsplit(':', 1) - hosts.append(host.strip('[]')) - ports.append(port) - return hosts, ports - def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, extra_specs, hypervisor_version): """Get `LibvirtConfigGuestDisk` filled for this image. @@ -618,7 +524,7 @@ def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, """ info = vconfig.LibvirtConfigGuestDisk() - hosts, ports = self._get_mon_addrs() + hosts, ports = self.driver.get_mon_addrs() info.device_type = device_type info.driver_format = 'raw' info.driver_cache = cache_mode @@ -644,21 +550,9 @@ def _can_fallocate(self): return False def check_image_exists(self): - rbd_volumes = libvirt_utils.list_rbd_volumes(self.pool) - for vol in rbd_volumes: - if vol.startswith(self.rbd_name): - return True - - return False - - def _resize(self, volume_name, size): - with RBDVolumeProxy(self, volume_name) as vol: - vol.resize(int(size)) + return self.driver.exists(self.rbd_name) def create_image(self, prepare_template, base, size, *args, **kwargs): - if self.rbd is None: - raise RuntimeError(_('rbd python libraries not found')) - if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) else: @@ -667,15 +561,15 @@ def create_image(self, prepare_template, base, size, *args, **kwargs): # keep using the command line import instead of librbd since it # detects zeroes to preserve sparseness in the image args = ['--pool', self.pool, base, self.rbd_name] - if self._supports_layering(): + if self.driver.supports_layering(): args += ['--new-format'] - args += self._ceph_args() + args += self.driver.ceph_args() libvirt_utils.import_rbd_image(*args) base_size = disk.get_disk_size(base) if size and size > base_size: - self._resize(self.rbd_name, size) + self.driver.resize(self.rbd_name, size) def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, out_format) diff --git a/nova/virt/libvirt/rbd.py b/nova/virt/libvirt/rbd.py new file mode 100644 index 0000000000..0c39d2df4e --- /dev/null +++ b/nova/virt/libvirt/rbd.py @@ -0,0 +1,147 @@ +# Copyright 2012 Grid Dynamics +# Copyright 2013 Inktank Storage, Inc. +# Copyright 2014 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + import rados + import rbd +except ImportError: + rados = None + rbd = None + +from nova.i18n import _ +from nova.i18n import _LE +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova import utils + +LOG = logging.getLogger(__name__) + + +class RBDVolumeProxy(object): + """Context manager for dealing with an existing rbd volume. + + This handles connecting to rados and opening an ioctx automatically, and + otherwise acts like a librbd Image object. + + The underlying librados client and ioctx can be accessed as the attributes + 'client' and 'ioctx'. + """ + def __init__(self, driver, name, pool=None): + client, ioctx = driver._connect_to_rados(pool) + try: + self.volume = rbd.Image(ioctx, str(name), snapshot=None) + except rbd.Error: + LOG.exception(_LE("error opening rbd image %s"), name) + driver._disconnect_from_rados(client, ioctx) + raise + self.driver = driver + self.client = client + self.ioctx = ioctx + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + try: + self.volume.close() + finally: + self.driver._disconnect_from_rados(self.client, self.ioctx) + + def __getattr__(self, attrib): + return getattr(self.volume, attrib) + + +class RBDDriver(object): + + def __init__(self, pool, ceph_conf, rbd_user): + self.pool = pool.encode('utf8') + # NOTE(angdraug): rados.Rados fails to connect if ceph_conf is None: + # https://github.com/ceph/ceph/pull/1787 + self.ceph_conf = ceph_conf.encode('utf8') if ceph_conf else '' + self.rbd_user = rbd_user.encode('utf8') if rbd_user else None + if rbd is None: + raise RuntimeError(_('rbd python libraries not found')) + + def _connect_to_rados(self, pool=None): + client = rados.Rados(rados_id=self.rbd_user, + conffile=self.ceph_conf) + try: + client.connect() + pool_to_open = str(pool or self.pool) + ioctx = client.open_ioctx(pool_to_open) + return client, ioctx + except rados.Error: + # shutdown cannot raise an exception + client.shutdown() + raise + + def _disconnect_from_rados(self, client, ioctx): + # closing an ioctx cannot raise an exception + ioctx.close() + client.shutdown() + + def supports_layering(self): + return hasattr(rbd, 'RBD_FEATURE_LAYERING') + + def ceph_args(self): + """List of command line parameters to be passed to ceph commands to + reflect RBDDriver configuration such as RBD user name and location + of ceph.conf. + """ + args = [] + if self.rbd_user: + args.extend(['--id', self.rbd_user]) + if self.ceph_conf: + args.extend(['--conf', self.ceph_conf]) + return args + + def get_mon_addrs(self): + args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args() + out, _ = utils.execute(*args) + lines = out.split('\n') + if lines[0].startswith('dumped monmap epoch'): + lines = lines[1:] + monmap = jsonutils.loads('\n'.join(lines)) + addrs = [mon['addr'] for mon in monmap['mons']] + hosts = [] + ports = [] + for addr in addrs: + host_port = addr[:addr.rindex('/')] + host, port = host_port.rsplit(':', 1) + hosts.append(host.strip('[]')) + ports.append(port) + return hosts, ports + + def size(self, name): + with RBDVolumeProxy(self, name) as vol: + return vol.size() + + def resize(self, name, size): + """Resize RBD volume. + + :name: Name of RBD object + :size: New size in bytes + """ + LOG.debug('resizing rbd image %s to %d', name, size) + with RBDVolumeProxy(self, name) as vol: + vol.resize(size) + + def exists(self, name): + try: + with RBDVolumeProxy(self, name): + return True + except rbd.ImageNotFound: + return False From 13e2bd02a5b50973f95eb3d8fc0af4e0702e3381 Mon Sep 17 00:00:00 2001 From: Dmitry Borodaenko Date: Thu, 13 Mar 2014 14:33:11 -0700 Subject: [PATCH 193/486] Use library instead of CLI to cleanup RBD volumes 'rbd list' CLI returns error code when there are no rbd volumes, which causes problems during live migration of VMs with RBD backed ephemeral volumes. It's safer to use the library that only raises an exception in case of a real problem. The only case where rbd CLI is still justified is import, which is needed to correctly import sparse image files. All code related to cleanup of RBD volumes is moved to rbd.py, this fixes a yo-yo problem with single-use methods scattered across 3 different files, and minimizes impact of this fix on imports in imagebackend and utils. Closes-Bug: #1346092 Change-Id: I92cd6b16fbd93b377fe47b15d22efbbf68d02513 Signed-off-by: Dmitry Borodaenko --- nova/tests/virt/libvirt/fake_libvirt_utils.py | 16 ------- nova/tests/virt/libvirt/test_driver.py | 37 ++++----------- nova/tests/virt/libvirt/test_imagebackend.py | 10 ++-- nova/tests/virt/libvirt/test_rbd.py | 15 ++++++ nova/tests/virt/libvirt/test_utils.py | 40 ---------------- nova/virt/libvirt/driver.py | 17 +++---- nova/virt/libvirt/imagebackend.py | 9 +--- nova/virt/libvirt/rbd.py | 45 ++++++++++++++++++ nova/virt/libvirt/utils.py | 46 ------------------- 9 files changed, 82 insertions(+), 153 deletions(-) diff --git a/nova/tests/virt/libvirt/fake_libvirt_utils.py b/nova/tests/virt/libvirt/fake_libvirt_utils.py index 1585e60d92..865c0586ac 100644 --- a/nova/tests/virt/libvirt/fake_libvirt_utils.py +++ b/nova/tests/virt/libvirt/fake_libvirt_utils.py @@ -110,10 +110,6 @@ def create_lvm_image(vg, lv, size, sparse=False): pass -def import_rbd_image(path, *args): - pass - - def volume_group_free_space(vg): pass @@ -194,17 +190,5 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False): return "qemu" -def list_rbd_volumes(pool): - fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local', - '875a8070-d0b9-4949-8b31-104d125c9a64.swap', - '875a8070-d0b9-4949-8b31-104d125c9a64', - 'wrong875a8070-d0b9-4949-8b31-104d125c9a64'] - return fake_volumes - - -def remove_rbd_volumes(pool, *names): - pass - - def get_arch(image_meta): pass diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index b9700c8ea0..67ee7314d8 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -80,6 +80,7 @@ from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import imagebackend +from nova.virt.libvirt import rbd from nova.virt.libvirt import utils as libvirt_utils from nova.virt import netutils @@ -6118,38 +6119,16 @@ def fake_delete_instance_files(instance): "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"} conn.destroy(self.context, instance, []) - def test_cleanup_rbd(self): - mock = self.mox.CreateMock(libvirt.virDomain) - - def fake_lookup_by_name(instance_name): - return mock - - def fake_get_info(instance_name): - return {'state': power_state.SHUTDOWN, 'id': -1} - - fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local', - '875a8070-d0b9-4949-8b31-104d125c9a64.swap', - '875a8070-d0b9-4949-8b31-104d125c9a64', - 'wrong875a8070-d0b9-4949-8b31-104d125c9a64'] - fake_pool = 'fake_pool' - fake_instance = {'name': 'fakeinstancename', 'id': 'instanceid', - 'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} - - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name) - self.stubs.Set(conn, 'get_info', fake_get_info) - - self.flags(images_rbd_pool=fake_pool, group='libvirt') - self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, - 'remove_rbd_volumes') - libvirt_driver.libvirt_utils.remove_rbd_volumes(fake_pool, - *fake_volumes[:3]) - - self.mox.ReplayAll() + @mock.patch.object(rbd, 'RBDDriver') + def test_cleanup_rbd(self, mock_driver): + driver = mock_driver.return_value + driver.cleanup_volumes = mock.Mock() + fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn._cleanup_rbd(fake_instance) - self.mox.VerifyAll() + driver.cleanup_volumes.assert_called_once_with(fake_instance) def test_destroy_undefines_no_undefine_flags(self): mock = self.mox.CreateMock(libvirt.virDomain) diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py index bb92f30ec6..06f4f2e7ee 100644 --- a/nova/tests/virt/libvirt/test_imagebackend.py +++ b/nova/tests/virt/libvirt/test_imagebackend.py @@ -745,19 +745,23 @@ def test_create_image(self): rbd.rbd.RBD_FEATURE_LAYERING = 1 + fake_processutils.fake_execute_clear_log() + fake_processutils.stub_out_processutils_execute(self.stubs) + self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size') imagebackend.disk.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.SIZE) - rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME) - cmd = ('--pool', self.POOL, self.TEMPLATE_PATH, + rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) + cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH, rbd_name, '--new-format', '--id', self.USER, '--conf', self.CONF) - self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) image.create_image(fn, self.TEMPLATE_PATH, None) + self.assertEqual(fake_processutils.fake_execute_get_log(), + [' '.join(cmd)]) self.mox.VerifyAll() def test_prealloc_image(self): diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/virt/libvirt/test_rbd.py index 93a4bed4bb..eb5b57b743 100644 --- a/nova/tests/virt/libvirt/test_rbd.py +++ b/nova/tests/virt/libvirt/test_rbd.py @@ -167,3 +167,18 @@ def test_exists(self, mock_proxy): self.assertTrue(self.driver.exists(self.volume_name)) proxy.__enter__.assert_called_once_with() proxy.__exit__.assert_called_once_with(None, None, None) + + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd, 'RADOSClient') + def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd): + instance = {'uuid': '12345'} + + rbd = mock_rbd.RBD.return_value + rbd.list.return_value = ['12345_test', '111_test'] + + client = mock_client.return_value + self.driver.cleanup_volumes(instance) + rbd.remove.assert_called_once_with(client.ioctx, '12345_test') + client.__enter__.assert_called_once_with() + client.__exit__.assert_called_once_with(None, None, None) diff --git a/nova/tests/virt/libvirt/test_utils.py b/nova/tests/virt/libvirt/test_utils.py index 827b2cf89c..484919f18a 100644 --- a/nova/tests/virt/libvirt/test_utils.py +++ b/nova/tests/virt/libvirt/test_utils.py @@ -47,46 +47,6 @@ def test_get_disk_type(self): disk_type = libvirt_utils.get_disk_type(path) self.assertEqual(disk_type, 'raw') - def test_list_rbd_volumes(self): - conf = '/etc/ceph/fake_ceph.conf' - pool = 'fake_pool' - user = 'user' - self.flags(images_rbd_ceph_conf=conf, group='libvirt') - self.flags(rbd_user=user, group='libvirt') - self.mox.StubOutWithMock(libvirt_utils.utils, - 'execute') - libvirt_utils.utils.execute('rbd', '-p', pool, 'ls', '--id', - user, - '--conf', conf).AndReturn(("Out", "Error")) - self.mox.ReplayAll() - - libvirt_utils.list_rbd_volumes(pool) - - self.mox.VerifyAll() - - def test_remove_rbd_volumes(self): - conf = '/etc/ceph/fake_ceph.conf' - pool = 'fake_pool' - user = 'user' - names = ['volume1', 'volume2', 'volume3'] - self.flags(images_rbd_ceph_conf=conf, group='libvirt') - self.flags(rbd_user=user, group='libvirt') - self.mox.StubOutWithMock(libvirt_utils.utils, 'execute') - libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume1'), - '--id', user, '--conf', conf, attempts=3, - run_as_root=True) - libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume2'), - '--id', user, '--conf', conf, attempts=3, - run_as_root=True) - libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume3'), - '--id', user, '--conf', conf, attempts=3, - run_as_root=True) - self.mox.ReplayAll() - - libvirt_utils.remove_rbd_volumes(pool, *names) - - self.mox.VerifyAll() - @mock.patch('nova.utils.execute') def test_copy_image_local_cp(self, mock_execute): libvirt_utils.copy_image('src', 'dest') diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index bad79cc2bb..11ebf34076 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -92,6 +92,7 @@ from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import lvm +from nova.virt.libvirt import rbd from nova.virt.libvirt import utils as libvirt_utils from nova.virt import netutils from nova.virt import watchdog_actions @@ -1099,17 +1100,11 @@ def cleanup(self, context, instance, network_info, block_device_info=None, self._cleanup_rbd(instance) def _cleanup_rbd(self, instance): - pool = CONF.libvirt.images_rbd_pool - volumes = libvirt_utils.list_rbd_volumes(pool) - pattern = instance['uuid'] - - def belongs_to_instance(disk): - return disk.startswith(pattern) - - volumes = filter(belongs_to_instance, volumes) - - if volumes: - libvirt_utils.remove_rbd_volumes(pool, *volumes) + driver = rbd.RBDDriver( + pool=CONF.libvirt.images_rbd_pool, + ceph_conf=CONF.libvirt.images_rbd_ceph_conf, + rbd_user=CONF.libvirt.rbd_user) + driver.cleanup_volumes(instance) def _cleanup_lvm(self, instance): """Delete all LVM disks for given instance object.""" diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 3a510ca295..9cf96e4c66 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -558,16 +558,9 @@ def create_image(self, prepare_template, base, size, *args, **kwargs): else: self.verify_base_size(base, size) - # keep using the command line import instead of librbd since it - # detects zeroes to preserve sparseness in the image - args = ['--pool', self.pool, base, self.rbd_name] - if self.driver.supports_layering(): - args += ['--new-format'] - args += self.driver.ceph_args() - libvirt_utils.import_rbd_image(*args) + self.driver.import_image(base, self.rbd_name) base_size = disk.get_disk_size(base) - if size and size > base_size: self.driver.resize(self.rbd_name, size) diff --git a/nova/virt/libvirt/rbd.py b/nova/virt/libvirt/rbd.py index 0c39d2df4e..78084805d4 100644 --- a/nova/virt/libvirt/rbd.py +++ b/nova/virt/libvirt/rbd.py @@ -23,6 +23,7 @@ from nova.i18n import _ from nova.i18n import _LE +from nova.i18n import _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils @@ -64,6 +65,19 @@ def __getattr__(self, attrib): return getattr(self.volume, attrib) +class RADOSClient(object): + """Context manager to simplify error handling for connecting to ceph.""" + def __init__(self, driver, pool=None): + self.driver = driver + self.cluster, self.ioctx = driver._connect_to_rados(pool) + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + self.driver._disconnect_from_rados(self.cluster, self.ioctx) + + class RBDDriver(object): def __init__(self, pool, ceph_conf, rbd_user): @@ -145,3 +159,34 @@ def exists(self, name): return True except rbd.ImageNotFound: return False + + def import_image(self, base, name): + """Import RBD volume from image file. + + Uses the command line import instead of librbd since rbd import + command detects zeroes to preserve sparseness in the image. + + :base: Path to image file + :name: Name of RBD volume + """ + args = ['--pool', self.pool, base, name] + if self.supports_layering(): + args += ['--new-format'] + args += self.ceph_args() + utils.execute('rbd', 'import', *args) + + def cleanup_volumes(self, instance): + with RADOSClient(self, self.pool) as client: + + def belongs_to_instance(disk): + return disk.startswith(instance['uuid']) + + # pylint: disable=E1101 + volumes = rbd.RBD().list(client.ioctx) + for volume in filter(belongs_to_instance, volumes): + try: + rbd.RBD().remove(client.ioctx, volume) + except (rbd.ImageNotFound, rbd.ImageHasSnapshots): + LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s ' + 'failed'), + {'volume': volume, 'pool': self.pool}) diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index d772c8eb57..825a81a04e 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -203,52 +203,6 @@ def create_cow_image(backing_file, path, size=None): execute(*cmd) -def import_rbd_image(*args): - execute('rbd', 'import', *args) - - -def _run_rbd(*args, **kwargs): - total = list(args) - - if CONF.libvirt.rbd_user: - total.extend(['--id', str(CONF.libvirt.rbd_user)]) - if CONF.libvirt.images_rbd_ceph_conf: - total.extend(['--conf', str(CONF.libvirt.images_rbd_ceph_conf)]) - - return utils.execute(*total, **kwargs) - - -def list_rbd_volumes(pool): - """List volumes names for given ceph pool. - - :param pool: ceph pool name - """ - try: - out, err = _run_rbd('rbd', '-p', pool, 'ls') - except processutils.ProcessExecutionError: - # No problem when no volume in rbd pool - return [] - - return [line.strip() for line in out.splitlines()] - - -def remove_rbd_volumes(pool, *names): - """Remove one or more rbd volume.""" - for name in names: - # NOTE(nic): the rbd command supports two methods for - # specifying a pool name: the "-p" flag, and using the volume - # name notation "pool_name/volume_name" - # The latter method supercedes the former, so to guard - # against slashes in the volume name confusing things, always - # use the path notation - rbd_remove = ('rbd', 'rm', os.path.join(pool, name)) - try: - _run_rbd(*rbd_remove, attempts=3, run_as_root=True) - except processutils.ProcessExecutionError: - LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"), - {'name': name, 'pool': pool}) - - def pick_disk_driver_name(hypervisor_version, is_block_dev=False): """Pick the libvirt primary backend driver name From 7ae506a4b1829fbd8cbecc0a6b267f76230face7 Mon Sep 17 00:00:00 2001 From: Eric Brown Date: Mon, 7 Apr 2014 13:27:47 -0700 Subject: [PATCH 194/486] VMware: Resize operation fails to change disk size The finish_migration step will resize the cpu and memory to match the new flavor chosen, but not the disk size. Additional code was added to extend the disk size using ExtendVirtualDisk_Task. DocImpact Change-Id: Iff10443f603d329d01a74a620079f80518b75a50 Closes-bug: #1291741 --- nova/tests/virt/vmwareapi/test_driver_api.py | 35 ++++++++++++++++---- nova/tests/virt/vmwareapi/test_vm_util.py | 12 +++++++ nova/tests/virt/vmwareapi/test_vmops.py | 18 +++++++++- nova/virt/vmwareapi/vm_util.py | 13 +++++++- nova/virt/vmwareapi/vmops.py | 20 +++++++++++ 5 files changed, 90 insertions(+), 8 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 933f3aae6c..4333fa0b7c 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -2507,9 +2507,9 @@ def fake_update_instance_progress(context, instance, step, def fake_get_host_ref_from_name(dest): return None - self._create_vm() + self._create_vm(instance_type='m1.large') vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance) - flavor = {'name': 'fake', 'flavorid': 'fake_id'} + flavor = self._get_instance_type_by_name('m1.large') self.stubs.Set(self.conn._vmops, "_update_instance_progress", fake_update_instance_progress) self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name", @@ -2580,6 +2580,13 @@ def test_confirm_migration(self): self._create_vm() self.conn.confirm_migration(self.context, self.instance, None) + def test_resize_to_smaller_disk(self): + self._create_vm(instance_type='m1.large') + flavor = self._get_instance_type_by_name('m1.small') + self.assertRaises(exception.InstanceFaultRollback, + self.conn.migrate_disk_and_power_off, self.context, + self.instance, 'fake_dest', flavor, None) + def test_spawn_attach_volume_vmdk(self): self._spawn_attach_volume_vmdk(vc_support=True) @@ -2665,8 +2672,12 @@ def _test_finish_migration(self, power_on, resize_instance=False): """Tests the finish_migration method on VC Driver.""" # setup the test instance in the database self._create_vm() - vm_ref = vm_util.get_vm_ref(self.conn._session, - self.instance) + if resize_instance: + self.instance.system_metadata = {'old_instance_type_root_gb': '0'} + vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) + datastore = ds_util.Datastore(ref='fake-ref', name='fake') + dc_info = vmops.DcInfo(ref='fake_ref', name='fake', + vmFolder='fake_folder') with contextlib.nested( mock.patch.object(self.conn._session, "_call_method", return_value='fake-task'), @@ -2675,9 +2686,17 @@ def _test_finish_migration(self, power_on, resize_instance=False): mock.patch.object(self.conn._session, "_wait_for_task"), mock.patch.object(vm_util, "get_vm_resize_spec", return_value='fake-spec'), + mock.patch.object(ds_util, "get_datastore", + return_value=datastore), + mock.patch.object(self.conn._vmops, + 'get_datacenter_ref_and_name', + return_value=dc_info), + mock.patch.object(self.conn._vmops, '_extend_virtual_disk'), mock.patch.object(vm_util, "power_on_instance") ) as (fake_call_method, fake_update_instance_progress, - fake_wait_for_task, fake_vm_resize_spec, fake_power_on): + fake_wait_for_task, fake_vm_resize_spec, + fake_get_datastore, fake_get_datacenter_ref_and_name, + fake_extend_virtual_disk, fake_power_on): self.conn.finish_migration(context=self.context, migration=None, instance=self.instance, @@ -2691,16 +2710,20 @@ def _test_finish_migration(self, power_on, resize_instance=False): fake_vm_resize_spec.assert_called_once_with( self.conn._session._get_vim().client.factory, self.instance) - fake_call_method.assert_called_once_with( + fake_call_method.assert_any_call( self.conn._session._get_vim(), "ReconfigVM_Task", vm_ref, spec='fake-spec') fake_wait_for_task.assert_called_once_with('fake-task') + fake_extend_virtual_disk.assert_called_once_with( + self.instance, self.instance['root_gb'] * units.Mi, + None, dc_info.ref) else: self.assertFalse(fake_vm_resize_spec.called) self.assertFalse(fake_call_method.called) self.assertFalse(fake_wait_for_task.called) + self.assertFalse(fake_extend_virtual_disk.called) if power_on: fake_power_on.assert_called_once_with(self.conn._session, diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py index 62b016696a..74fd99debe 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util.py +++ b/nova/tests/virt/vmwareapi/test_vm_util.py @@ -300,6 +300,18 @@ def _vmdk_path_and_adapter_type_devices(self, filename, parent=None): devices = [disk, controller] return devices + def test_get_vmdk_path(self): + uuid = '00000000-0000-0000-0000-000000000000' + filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid) + devices = self._vmdk_path_and_adapter_type_devices(filename) + session = fake.FakeSession() + + with mock.patch.object(session, '_call_method', + return_value=devices): + instance = {'uuid': uuid} + vmdk_path = vm_util.get_vmdk_path(session, None, instance) + self.assertEqual(filename, vmdk_path) + def test_get_vmdk_path_and_adapter_type(self): filename = '[test_datastore] test_file.vmdk' devices = self._vmdk_path_and_adapter_type_devices(filename) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index 35dc094402..b77ffe1860 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -434,6 +434,11 @@ def fake_call_method(module, method, *args, **kwargs): def _test_finish_migration(self, power_on=True, resize_instance=False): """Tests the finish_migration method on vmops.""" + if resize_instance: + self._instance.system_metadata = {'old_instance_type_root_gb': '0'} + datastore = ds_util.Datastore(ref='fake-ref', name='fake') + dc_info = vmops.DcInfo(ref='fake_ref', name='fake', + vmFolder='fake_folder') with contextlib.nested( mock.patch.object(self._session, "_call_method", return_value='fake-task'), @@ -441,9 +446,16 @@ def _test_finish_migration(self, power_on=True, resize_instance=False): mock.patch.object(self._session, "_wait_for_task"), mock.patch.object(vm_util, "get_vm_resize_spec", return_value='fake-spec'), + mock.patch.object(ds_util, "get_datastore", + return_value=datastore), + mock.patch.object(self._vmops, 'get_datacenter_ref_and_name', + return_value=dc_info), + mock.patch.object(self._vmops, '_extend_virtual_disk'), mock.patch.object(vm_util, "power_on_instance") ) as (fake_call_method, fake_update_instance_progress, - fake_wait_for_task, fake_vm_resize_spec, fake_power_on): + fake_wait_for_task, fake_vm_resize_spec, + fake_get_datastore, fake_get_datacenter_ref_and_name, + fake_extend_virtual_disk, fake_power_on): self._vmops.finish_migration(context=self._context, migration=None, instance=self._instance, @@ -463,9 +475,13 @@ def _test_finish_migration(self, power_on=True, resize_instance=False): 'f', spec='fake-spec')) fake_wait_for_task.assert_called_once_with('fake-task') + fake_extend_virtual_disk.assert_called_once_with( + self._instance, self._instance['root_gb'] * units.Mi, + None, dc_info.ref) else: self.assertFalse(fake_vm_resize_spec.called) self.assertFalse(fake_wait_for_task.called) + self.assertFalse(fake_extend_virtual_disk.called) if power_on: fake_power_on.assert_called_once_with(self._session, diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 193b8375db..0f90ab8a44 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -371,6 +371,16 @@ def get_vm_extra_config_spec(client_factory, extra_opts): return config_spec +def get_vmdk_path(session, vm_ref, instance): + """Gets the vmdk file path for specified instance.""" + hardware_devices = session._call_method(vim_util, + "get_dynamic_property", vm_ref, "VirtualMachine", + "config.hardware.device") + (vmdk_path, adapter_type, disk_type) = get_vmdk_path_and_adapter_type( + hardware_devices, uuid=instance['uuid']) + return vmdk_path + + def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None): """Gets the vmdk file path and the storage adapter type.""" if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": @@ -1325,7 +1335,8 @@ def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref, raise error_util.MissingParameter(param="vm_ref") # Get the clone vm spec client_factory = session._get_vim().client.factory - rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref) + rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref, + disk_move_type='moveAllDiskBackingsAndDisallowSharing') extra_opts = {'nvp.vm-uuid': instance['uuid']} config_spec = get_vm_extra_config_spec(client_factory, extra_opts) config_spec.instanceUuid = instance['uuid'] diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 4315d20899..7082258cd5 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -1107,6 +1107,12 @@ def migrate_disk_and_power_off(self, context, instance, dest, """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. """ + # Checks if the migration needs a disk resize down. + if flavor['root_gb'] < instance['root_gb']: + reason = _("Unable to shrink disk.") + raise exception.InstanceFaultRollback( + exception.ResizeError(reason=reason)) + # 0. Zero out the progress to begin self._update_instance_progress(context, instance, step=0, @@ -1185,6 +1191,20 @@ def finish_migration(self, context, migration, instance, disk_info, instance) vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec) + # Resize the disk (if larger) + old_root_gb = instance.system_metadata['old_instance_type_root_gb'] + if instance['root_gb'] > int(old_root_gb): + root_disk_in_kb = instance['root_gb'] * units.Mi + vmdk_path = vm_util.get_vmdk_path(self._session, vm_ref, + instance) + data_store_ref = ds_util.get_datastore(self._session, + self._cluster, datastore_regex=self._datastore_regex).ref + dc_info = self.get_datacenter_ref_and_name(data_store_ref) + self._extend_virtual_disk(instance, root_disk_in_kb, vmdk_path, + dc_info.ref) + + # TODO(ericwb): add extend for ephemeral disk + # 4. Start VM if power_on: vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref) From 06056c852c89eae9118edc609e238cef5a764a8e Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 7 Jul 2014 18:01:43 -0700 Subject: [PATCH 195/486] Deduplicate module listings in devref devref already contains a list of all modules, so no need to explicitly include files that just contain lists of modules under the section 'Module Reference'. Most of the deleted files are very outdated and refer to non-existent modules anyway. Leave services for now as it contains some content that should be preserved. A later patch will clean those files up. Change-Id: I5dce1b523f3648667fcde49ee51206ae9ccfdec8 --- doc/source/devref/api.rst | 270 ------------------------------ doc/source/devref/compute.rst | 140 ---------------- doc/source/devref/database.rst | 63 ------- doc/source/devref/fakes.rst | 65 ------- doc/source/devref/glance.rst | 28 ---- doc/source/devref/index.rst | 9 - doc/source/devref/network.rst | 49 ------ doc/source/devref/nova.rst | 215 ------------------------ doc/source/devref/objectstore.rst | 71 -------- doc/source/devref/scheduler.rst | 61 ------- 10 files changed, 971 deletions(-) delete mode 100644 doc/source/devref/api.rst delete mode 100644 doc/source/devref/compute.rst delete mode 100644 doc/source/devref/database.rst delete mode 100644 doc/source/devref/fakes.rst delete mode 100644 doc/source/devref/glance.rst delete mode 100644 doc/source/devref/network.rst delete mode 100644 doc/source/devref/nova.rst delete mode 100644 doc/source/devref/objectstore.rst delete mode 100644 doc/source/devref/scheduler.rst diff --git a/doc/source/devref/api.rst b/doc/source/devref/api.rst deleted file mode 100644 index 8827b8f17e..0000000000 --- a/doc/source/devref/api.rst +++ /dev/null @@ -1,270 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -API Endpoint -============ - -Nova has a system for managing multiple APIs on different subdomains. -Currently there is support for the OpenStack API, as well as the Amazon EC2 -API. - -Common Components ------------------ - -The :mod:`nova.api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`nova.api.cloud` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.api.cloud - :noindex: - :members: - :undoc-members: - :show-inheritance: - -OpenStack API -------------- - -The :mod:`openstack` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`auth` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.auth - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`backup_schedules` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.backup_schedules - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`faults` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.faults - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`flavors` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.flavors - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`images` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.images - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`servers` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.servers - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`sharedipgroups` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: nova.api.openstack.sharedipgroups - :noindex: - :members: - :undoc-members: - :show-inheritance: - -EC2 API -------- - -The :mod:`nova.api.ec2` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.api.ec2 - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`apirequest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.api.ec2.apirequest - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`cloud` Module -~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.api.ec2.cloud - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`images` Module -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.api.ec2.images - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`metadatarequesthandler` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.api.ec2.metadatarequesthandler - :noindex: - :members: - :undoc-members: - :show-inheritance: - -Tests ------ - -The :mod:`api_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`api_integration` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api_integration - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`cloud_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.cloud_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`api.fakes` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.fakes - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`api.test_wsgi` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.test_wsgi - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_api - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_auth` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_auth - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_faults` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_faults - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_flavors` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_flavors - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_images` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_images - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_servers` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_servers - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`test_sharedipgroups` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.api.openstack.test_sharedipgroups - :noindex: - :members: - :undoc-members: - :show-inheritance: - diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst deleted file mode 100644 index 00da777e80..0000000000 --- a/doc/source/devref/compute.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -Virtualization -============== - - -Compute -------- - -Documentation for the compute manager and related files. For reading about -a specific virtualization backend, read Drivers_. - - -The :mod:`nova.compute.manager` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.compute.manager - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`nova.virt.connection` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.virt.connection - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`nova.compute.disk` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.compute.disk - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`nova.virt.images` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.virt.images - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.compute.flavors` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.compute.flavors - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.compute.power_state` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.compute.power_state - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Drivers -------- - - -The :mod:`nova.virt.libvirt_conn` Driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.virt.libvirt_conn - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.virt.xenapi` Driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.virt.xenapi - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.virt.fake` Driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.virt.fake - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -The :mod:`compute_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.compute_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`virt_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.virt_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/devref/database.rst b/doc/source/devref/database.rst deleted file mode 100644 index a26e487057..0000000000 --- a/doc/source/devref/database.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -The Database Layer -================== - -The :mod:`nova.db.api` Module ------------------------------ - -.. automodule:: nova.db.api - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The Sqlalchemy Driver ---------------------- - -The :mod:`nova.db.sqlalchemy.api` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.db.sqlalchemy.api - :noindex: - -The :mod:`nova.db.sqlalchemy.models` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.db.sqlalchemy.models - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`nova.db.sqlalchemy.session` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.db.sqlalchemy.session - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -Tests are lacking for the db api layer and for the sqlalchemy driver. -Failures in the drivers would be detected in other test cases, though. diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst deleted file mode 100644 index d98154871f..0000000000 --- a/doc/source/devref/fakes.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Fake Drivers -============ - -.. todo:: document general info about fakes - -When the real thing isn't available and you have some development to do these -fake implementations of various drivers let you get on with your day. - - -The :mod:`nova.virt.fake` Module --------------------------------- - -.. automodule:: nova.virt.fake - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.auth.fakeldap` Module ------------------------------------- - -.. automodule:: nova.auth.fakeldap - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :class:`nova.tests.service_unittest.FakeManager` Class ----------------------------------------------------------- - -.. autoclass:: nova.tests.service_unittest.FakeManager - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.tests.api.openstack.fakes` Module ------------------------------------------------- - -.. automodule:: nova.tests.api.openstack.fakes - :noindex: - :members: - :undoc-members: - :show-inheritance: - diff --git a/doc/source/devref/glance.rst b/doc/source/devref/glance.rst deleted file mode 100644 index 9a1c14d58b..0000000000 --- a/doc/source/devref/glance.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Glance Integration - The Future of File Storage -=============================================== - -The :mod:`nova.image.service` Module ------------------------------------- - -.. automodule:: nova.image.service - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index b9117ff192..e98bdfc16e 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -76,15 +76,6 @@ Module Reference :maxdepth: 3 services - database - compute - network - api - scheduler - fakes - nova - objectstore - glance Indices and tables diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst deleted file mode 100644 index 56e9682ebd..0000000000 --- a/doc/source/devref/network.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Networking -========== - -The :mod:`nova.network.manager` Module --------------------------------------- - -.. automodule:: nova.network.manager - :noindex: - :members: - :undoc-members: - :show-inheritance: - -The :mod:`nova.network.linux_net` Driver ----------------------------------------- - -.. automodule:: nova.network.linux_net - :noindex: - :members: - :undoc-members: - :show-inheritance: - -Tests ------ - -The :mod:`network_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.network_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst deleted file mode 100644 index beca99ecd5..0000000000 --- a/doc/source/devref/nova.rst +++ /dev/null @@ -1,215 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Common and Misc Libraries -========================= - -Libraries common throughout Nova or just ones that haven't been categorized -very well yet. - - -The :mod:`nova.adminclient` Module ----------------------------------- - -.. automodule:: nova.adminclient - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.context` Module ------------------------------- - -.. automodule:: nova.context - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.exception` Module --------------------------------- - -.. automodule:: nova.exception - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.flags` Module ----------------------------- - -.. automodule:: nova.flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.process` Module ------------------------------- - -.. automodule:: nova.process - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.rpc` Module --------------------------- - -.. automodule:: nova.rpc - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.server` Module ------------------------------ - -.. automodule:: nova.server - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.test` Module ---------------------------- - -.. automodule:: nova.test - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.utils` Module ----------------------------- - -.. automodule:: nova.utils - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.validate` Module -------------------------------- - -.. automodule:: nova.validate - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.wsgi` Module ---------------------------- - -.. automodule:: nova.wsgi - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -The :mod:`declare_flags` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.declare_flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`fake_flags` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.fake_flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`flags_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.flags_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`process_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.process_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`real_flags` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.real_flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`rpc_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.rpc_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`runtime_flags` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.runtime_flags - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`validator_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.validator_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/devref/objectstore.rst b/doc/source/devref/objectstore.rst deleted file mode 100644 index f140e85e99..0000000000 --- a/doc/source/devref/objectstore.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Objectstore - File Storage Service -================================== - -The :mod:`nova.objectstore.handler` Module ------------------------------------------- - -.. automodule:: nova.objectstore.handler - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.objectstore.bucket` Module ------------------------------------------ - -.. automodule:: nova.objectstore.bucket - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.objectstore.stored` Module ------------------------------------------ - -.. automodule:: nova.objectstore.stored - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.objecstore.image` Module ----------------------------------------- - -.. automodule:: nova.objectstore.image - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -The :mod:`objectstore_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.objectstore_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst deleted file mode 100644 index 6f0b8edf56..0000000000 --- a/doc/source/devref/scheduler.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. - Copyright 2010-2011 United States Government as represented by the - Administrator of the National Aeronautics and Space Administration. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Scheduler -========= - -The :mod:`nova.scheduler.manager` Module ----------------------------------------- - -.. automodule:: nova.scheduler.manager - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.scheduler.driver` Module ---------------------------------------- - -.. automodule:: nova.scheduler.driver - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -The :mod:`nova.scheduler.chance` Driver ---------------------------------------- - -.. automodule:: nova.scheduler.chance - :noindex: - :members: - :undoc-members: - :show-inheritance: - - -Tests ------ - -The :mod:`scheduler_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.scheduler_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: From 05717876d531a232b48f5b23d07529386a64983c Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 7 Jul 2014 18:15:02 -0700 Subject: [PATCH 196/486] Remove api reference section in devref This previously just linked to api/autoindex which was removed in I6c93f9aae1994961fdca690742398a0d540d541f (in 2011). Change-Id: Iba86a5b84743fe6d65b797a2e9237d037c2bf1a8 --- doc/source/devref/index.rst | 7 ------- 1 file changed, 7 deletions(-) diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index e98bdfc16e..10148394eb 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -63,13 +63,6 @@ Other Resources gerrit jenkins -API Reference -------------- -.. toctree:: - :maxdepth: 3 - - ../api/autoindex - Module Reference ---------------- .. toctree:: From 07fd3f9a257ce4c4adf8e626cac414be30cdea2f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 8 Jul 2014 11:58:51 -0700 Subject: [PATCH 197/486] Turn on pbr's autodoc feature. In order to make the docs index and moduleindex pages work, turn on pbr's autodoc_index_modules feature. Change-Id: I0ef240f4624acf11900705d1c0f4d6d6f70247bd --- setup.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.cfg b/setup.cfg index cb8c651ff2..e804077290 100644 --- a/setup.cfg +++ b/setup.cfg @@ -161,3 +161,6 @@ output_file = nova/locale/nova.pot [wheel] universal = 1 + +[pbr] +autodoc_index_modules = 1 From a6741135d4f15a4acd07480588360fb2d2b300c5 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 28 Jul 2014 12:04:34 +0100 Subject: [PATCH 198/486] libvirt: remove 3 unused vif.py methods In the following commit: commit 842b2abfe76dede55b3b61ebaad5a90c356c5ace Author: Daniel P. Berrange Date: Mon Nov 25 14:00:25 2013 +0000 Increase min required libvirt to 0.9.11 support for setting up OVS outside of libvirt was dropped, but 3 of the helper methods were mistakenly not deleted. Related-bug: #1254727 Change-Id: I53de8703a64d1214ec81b9698e376737a92cb53a --- nova/virt/libvirt/vif.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 9c73ba5f0b..37fc828cad 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -180,17 +180,6 @@ def get_config_bridge(self, instance, vif, image_meta, inst_type): return conf - def get_config_ovs_ethernet(self, instance, vif, - image_meta, inst_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) - - dev = self.get_vif_devname(vif) - designer.set_vif_host_backend_ethernet_config(conf, dev) - - return conf - def get_config_ovs_bridge(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, @@ -371,17 +360,6 @@ def plug_bridge(self, instance, vif): self.get_bridge_name(vif), iface) - def plug_ovs_ethernet(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) - - iface_id = self.get_ovs_interfaceid(vif) - dev = self.get_vif_devname(vif) - linux_net.create_tap_dev(dev) - linux_net.create_ovs_vif_port(self.get_bridge_name(vif), - dev, iface_id, vif['address'], - instance['uuid']) - def plug_ovs_bridge(self, instance, vif): """No manual plugging required.""" super(LibvirtGenericVIFDriver, @@ -566,18 +544,6 @@ def unplug_bridge(self, instance, vif): super(LibvirtGenericVIFDriver, self).unplug(instance, vif) - def unplug_ovs_ethernet(self, instance, vif): - """Unplug the VIF by deleting the port from the bridge.""" - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) - - try: - linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), - self.get_vif_devname(vif)) - except processutils.ProcessExecutionError: - LOG.exception(_LE("Failed while unplugging vif"), - instance=instance) - def unplug_ovs_bridge(self, instance, vif): """No manual unplugging required.""" super(LibvirtGenericVIFDriver, From d90144e37f3207d3ec91c2c819136f7a08043971 Mon Sep 17 00:00:00 2001 From: "ChangBo Guo(gcb)" Date: Mon, 28 Jul 2014 20:46:26 +0800 Subject: [PATCH 199/486] Remove outdated docstring for nova.network.manager There was related-flags section to describe config options, but was outdated and duplicated with config options' description in code. So We should remove it. Change-Id: Ie0e444810393c77f04c1adbafdeb83c99d970902 --- nova/network/manager.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index d54730565a..be7955216c 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -22,23 +22,6 @@ topologies. All of the network commands are issued to a subclass of :class:`NetworkManager`. -**Related Flags** - -:network_driver: Driver to use for network creation -:flat_network_bridge: Bridge device for simple network instances -:flat_interface: FlatDhcp will bridge into this interface if set -:flat_network_dns: Dns for simple network -:vlan_start: First VLAN for private networks -:vpn_ip: Public IP for the cloudpipe VPN servers -:vpn_start: First Vpn port for private networks -:cnt_vpn_clients: Number of addresses reserved for vpn clients -:network_size: Number of addresses in each private subnet -:fixed_range: Fixed IP address block -:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip - is disassociated -:create_unique_mac_address_attempts: Number of times to attempt creating - a unique mac address - """ import datetime From c5de5e7ab166e29304a01d7e310ae6ed32d22090 Mon Sep 17 00:00:00 2001 From: Sagar Ratnakara Nikam Date: Sun, 15 Jun 2014 19:29:08 +0530 Subject: [PATCH 200/486] VMWare Driver - Ignore datastore in maintenance mode A datastore can be part of a datastore cluster. Any datastore which is part of a cluster can be put in maintenance mode. Instances cannot reside on a datastore if it is in maintenance mode. This fix ignores a datastore in the following scenarios 1. Stats update - Ignore a datastore during stats update. 2. Ignore a datastore during instance spawn. Change-Id: Id0cb3f5fdb4d0bc4884cf5405fadc433d4a2b6ba Closes-Bug: #1330065 --- nova/tests/virt/vmwareapi/fake.py | 6 +- nova/tests/virt/vmwareapi/test_ds_util.py | 62 +++++++++++++++++ .../test_ds_util_datastore_selection.py | 25 ++++--- nova/tests/virt/vmwareapi/test_vm_util.py | 1 + nova/virt/vmwareapi/ds_util.py | 68 ++++++++++++------- 5 files changed, 124 insertions(+), 38 deletions(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index 81c92402e2..23393e28c4 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -639,13 +639,15 @@ def _update_summary(self): class Datastore(ManagedObject): """Datastore class.""" - def __init__(self, name="fake-ds", capacity=1024, free=500): + def __init__(self, name="fake-ds", capacity=1024, free=500, + accessible=True, maintenance_mode="normal"): super(Datastore, self).__init__("ds") self.set("summary.type", "VMFS") self.set("summary.name", name) self.set("summary.capacity", capacity * units.Gi) self.set("summary.freeSpace", free * units.Gi) - self.set("summary.accessible", True) + self.set("summary.accessible", accessible) + self.set("summary.maintenanceMode", maintenance_mode) self.set("browser", "") diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py index 4e158aeeaf..0b8b5ba634 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util.py +++ b/nova/tests/virt/vmwareapi/test_ds_util.py @@ -170,6 +170,10 @@ def fake_wait_for_task(task_ref): def test_get_datastore(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore()) + fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000, + False, "normal")) + fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000, + True, "inMaintenance")) result = ds_util.get_datastore( fake.FakeObjectRetrievalSession(fake_objects)) @@ -261,6 +265,64 @@ def test_get_datastore_inaccessible_ds(self): ds_util.get_datastore, fake.FakeObjectRetrievalSession(fake_objects)) + def test_get_datastore_ds_in_maintenance(self): + data_store = fake.Datastore() + data_store.set("summary.maintenanceMode", "inMaintenance") + + fake_objects = fake.FakeRetrieveResult() + fake_objects.add_object(data_store) + + self.assertRaises(exception.DatastoreNotFound, + ds_util.get_datastore, + fake.FakeObjectRetrievalSession(fake_objects)) + + def _test_is_datastore_valid(self, accessible=True, + maintenance_mode="normal", + type="VMFS", + datastore_regex=None): + propdict = {} + propdict["summary.accessible"] = accessible + propdict["summary.maintenanceMode"] = maintenance_mode + propdict["summary.type"] = type + propdict["summary.name"] = "ds-1" + + return ds_util._is_datastore_valid(propdict, datastore_regex) + + def test_is_datastore_valid(self): + for ds_type in ds_util.ALLOWED_DATASTORE_TYPES: + self.assertTrue(self._test_is_datastore_valid(True, + "normal", + ds_type)) + + def test_is_datastore_valid_inaccessible_ds(self): + self.assertFalse(self._test_is_datastore_valid(False, + "normal", + "VMFS")) + + def test_is_datastore_valid_ds_in_maintenance(self): + self.assertFalse(self._test_is_datastore_valid(True, + "inMaintenance", + "VMFS")) + + def test_is_datastore_valid_ds_type_invalid(self): + self.assertFalse(self._test_is_datastore_valid(True, + "normal", + "vfat")) + + def test_is_datastore_valid_not_matching_regex(self): + datastore_regex = re.compile("ds-2") + self.assertFalse(self._test_is_datastore_valid(True, + "normal", + "VMFS", + datastore_regex)) + + def test_is_datastore_valid_matching_regex(self): + datastore_regex = re.compile("ds-1") + self.assertTrue(self._test_is_datastore_valid(True, + "normal", + "VMFS", + datastore_regex)) + class DatastoreTestCase(test.NoDBTestCase): def test_ds(self): diff --git a/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py b/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py index c34e76f4c2..ca211902cf 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py +++ b/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py @@ -30,10 +30,11 @@ class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase): def setUp(self): super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp() self.data = [ - ['VMFS', 'os-some-name', True, 987654321, 12346789], - ['NFS', 'another-name', True, 9876543210, 123467890], - ['BAD', 'some-name-bad', True, 98765432100, 1234678900], - ['VMFS', 'some-name-good', False, 987654321, 12346789], + ['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789], + ['NFS', 'another-name', True, 'normal', 9876543210, 123467890], + ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900], + ['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789], + ['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789] ] def build_result_set(self, mock_data, name_list=None): @@ -56,7 +57,8 @@ def build_result_set(self, mock_data, name_list=None): @property def propset_name_list(self): return ['summary.type', 'summary.name', 'summary.accessible', - 'summary.capacity', 'summary.freeSpace'] + 'summary.maintenanceMode', 'summary.capacity', + 'summary.freeSpace'] def test_filter_datastores_simple(self): datastores = self.build_result_set(self.data) @@ -95,11 +97,14 @@ def test_filter_datastores_no_match(self): def test_filter_datastores_specific_match(self): data = [ - ['VMFS', 'os-some-name', True, 987654321, 1234678], - ['NFS', 'another-name', True, 9876543210, 123467890], - ['BAD', 'some-name-bad', True, 98765432100, 1234678900], - ['VMFS', 'some-name-good', True, 987654321, 12346789], - ['VMFS', 'some-other-good', False, 987654321000, 12346789000], + ['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678], + ['NFS', 'another-name', True, 'normal', 9876543210, 123467890], + ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900], + ['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789], + ['VMFS', 'some-other-good', False, 'normal', 987654321000, + 12346789000], + ['VMFS', 'new-name', True, 'inMaintenance', 987654321000, + 12346789000] ] # only the DS some-name-good is accessible and matches the regex datastores = self.build_result_set(data) diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py index 62b016696a..9f1f42f5b6 100644 --- a/nova/tests/virt/vmwareapi/test_vm_util.py +++ b/nova/tests/virt/vmwareapi/test_vm_util.py @@ -251,6 +251,7 @@ def test_get_cdrom_attach_config_spec(self): }, 'backing': { 'datastore': { + "summary.maintenanceMode": "normal", "summary.type": "VMFS", "summary.accessible":true, "summary.name": "fake-ds", diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index 8db0e665b0..08bf166358 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -25,6 +25,7 @@ from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) +ALLOWED_DATASTORE_TYPES = ['VMFS', 'NFS'] class Datastore(object): @@ -200,33 +201,51 @@ def _select_datastore(data_stores, best_match, datastore_regex=None): continue propdict = vm_util.propset_dict(obj_content.propSet) - # Local storage identifier vSphere doesn't support CIFS or - # vfat for datastores, therefore filtered - ds_type = propdict['summary.type'] - ds_name = propdict['summary.name'] - if ((ds_type == 'VMFS' or ds_type == 'NFS') and - propdict.get('summary.accessible')): - if datastore_regex is None or datastore_regex.match(ds_name): - new_ds = Datastore( + if _is_datastore_valid(propdict, datastore_regex): + new_ds = Datastore( ref=obj_content.obj, - name=ds_name, + name=propdict['summary.name'], capacity=propdict['summary.capacity'], freespace=propdict['summary.freeSpace']) - # favor datastores with more free space - if (best_match is None or - new_ds.freespace > best_match.freespace): - best_match = new_ds + # favor datastores with more free space + if (best_match is None or + new_ds.freespace > best_match.freespace): + best_match = new_ds return best_match +def _is_datastore_valid(propdict, datastore_regex): + """Checks if a datastore is valid based on the following criteria. + + Criteria: + - Datastore is accessible + - Datastore is not in maintenance mode (optional) + - Datastore is of a supported disk type + - Datastore matches the supplied regex (optional) + + :param propdict: datastore summary dict + :param datastore_regex : Regex to match the name of a datastore. + """ + + # Local storage identifier vSphere doesn't support CIFS or + # vfat for datastores, therefore filtered + return (propdict.get('summary.accessible') and + (propdict.get('summary.maintenanceMode') is None or + propdict.get('summary.maintenanceMode') == 'normal') and + propdict['summary.type'] in ALLOWED_DATASTORE_TYPES and + (datastore_regex is None or + datastore_regex.match(propdict['summary.name']))) + + def get_datastore(session, cluster=None, host=None, datastore_regex=None): """Get the datastore list and choose the most preferable one.""" if cluster is None and host is None: data_stores = session._call_method(vim_util, "get_objects", "Datastore", ["summary.type", "summary.name", "summary.capacity", "summary.freeSpace", - "summary.accessible"]) + "summary.accessible", + "summary.maintenanceMode"]) else: if cluster is not None: datastore_ret = session._call_method( @@ -247,7 +266,8 @@ def get_datastore(session, cluster=None, host=None, datastore_regex=None): "Datastore", data_store_mors, ["summary.type", "summary.name", "summary.capacity", "summary.freeSpace", - "summary.accessible"]) + "summary.accessible", + "summary.maintenanceMode"]) best_match = None while data_stores: best_match = _select_datastore(data_stores, best_match, @@ -268,7 +288,7 @@ def get_datastore(session, cluster=None, host=None, datastore_regex=None): raise exception.DatastoreNotFound() -def _get_allowed_datastores(data_stores, datastore_regex, allowed_types): +def _get_allowed_datastores(data_stores, datastore_regex): allowed = [] for obj_content in data_stores.objects: # the propset attribute "need not be set" by returning API @@ -276,13 +296,9 @@ def _get_allowed_datastores(data_stores, datastore_regex, allowed_types): continue propdict = vm_util.propset_dict(obj_content.propSet) - # Local storage identifier vSphere doesn't support CIFS or - # vfat for datastores, therefore filtered - ds_type = propdict['summary.type'] - ds_name = propdict['summary.name'] - if (propdict['summary.accessible'] and ds_type in allowed_types): - if datastore_regex is None or datastore_regex.match(ds_name): - allowed.append(Datastore(ref=obj_content.obj, name=ds_name)) + if _is_datastore_valid(propdict, datastore_regex): + allowed.append(Datastore(ref=obj_content.obj, + name=propdict['summary.name'])) return allowed @@ -304,12 +320,12 @@ def get_available_datastores(session, cluster=None, datastore_regex=None): data_stores = session._call_method(vim_util, "get_properties_for_a_collection_of_objects", "Datastore", data_store_mors, - ["summary.type", "summary.name", "summary.accessible"]) + ["summary.type", "summary.name", "summary.accessible", + "summary.maintenanceMode"]) allowed = [] while data_stores: - allowed.extend(_get_allowed_datastores(data_stores, datastore_regex, - ['VMFS', 'NFS'])) + allowed.extend(_get_allowed_datastores(data_stores, datastore_regex)) token = _get_token(data_stores) if not token: break From b67eedb8e255776e853536f7a86217845dc57ecf Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 25 Jul 2014 10:03:26 -0700 Subject: [PATCH 201/486] Add index for reservations on (deleted, expire) the query for expire_reservations currently does a full table scan. This adds an index so frequent invocations of expire does not bog down the database. Change-Id: I82c6064159d7bfa7f4dc7fca5a7c9bc52b0f07fb Resolves-bug: 1348720 --- .../248_add_expire_reservations_index.py | 59 +++++++++++++++++++ nova/db/sqlalchemy/models.py | 1 + nova/tests/db/test_migrations.py | 10 ++++ 3 files changed, 70 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py b/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py new file mode 100644 index 0000000000..917ea1461e --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py @@ -0,0 +1,59 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + +from nova.i18n import _LI +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def _get_deleted_expire_index(table): + members = sorted(['deleted', 'expire']) + for idx in table.indexes: + if sorted(idx.columns.keys()) == members: + return idx + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + reservations = Table('reservations', meta, autoload=True) + if _get_deleted_expire_index(reservations): + LOG.info(_LI('Skipped adding reservations_deleted_expire_idx ' + 'because an equivalent index already exists.')) + return + + # Based on expire_reservations query + # from: nova/db/sqlalchemy/api.py + index = Index('reservations_deleted_expire_idx', + reservations.c.deleted, reservations.c.expire) + + index.create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + reservations = Table('reservations', meta, autoload=True) + + index = _get_deleted_expire_index(reservations) + if index: + index.drop(migrate_engine) + else: + LOG.info(_LI('Skipped removing reservations_deleted_expire_idx ' + 'because index does not exist.')) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 20198ed835..e1008515cc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -478,6 +478,7 @@ class Reservation(BASE, NovaBase): __table_args__ = ( Index('ix_reservations_project_id', 'project_id'), Index('reservations_uuid_idx', 'uuid'), + Index('reservations_deleted_expire_idx', 'deleted', 'expire'), ) id = Column(Integer, primary_key=True, nullable=False) uuid = Column(String(36), nullable=False) diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index 03f68b05da..c7acc97ef6 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -737,6 +737,16 @@ def _post_downgrade_247(self, engine): self.assertTrue(pci_devices.c.vendor_id.nullable) self.assertTrue(pci_devices.c.dev_type.nullable) + def _check_248(self, engine, data): + self.assertIndexMembers(engine, 'reservations', + 'reservations_deleted_expire_idx', + ['deleted', 'expire']) + + def _post_downgrade_248(self, engine): + reservations = oslodbutils.get_table(engine, 'reservations') + index_names = [idx.name for idx in reservations.indexes] + self.assertNotIn('reservations_deleted_expire_idx', index_names) + class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" From 155eeabbfafd4f48b47fea5b403f61a6e5c28426 Mon Sep 17 00:00:00 2001 From: Dmitry Borodaenko Date: Thu, 17 Jul 2014 21:45:03 -0700 Subject: [PATCH 202/486] Add include_locations kwarg to nova.image.API.get() To avoid feature envy from image backends that need image location URIs, functionality of nova.image.glance._get_locations() is moved into nova.image.glance.show() and exposed via a new include_locations kwarg to the nova.image.API.get() method, which, when True, simply includes a collection of location links when the image has them. blueprint rbd-clone-image-handler Co-Authored-By: Jay Pipes Change-Id: I5118013aa24f68520dee806c24265f2808a90588 --- nova/image/api.py | 11 +- nova/image/glance.py | 59 ++++---- nova/tests/api/ec2/test_cinder_cloud.py | 4 +- nova/tests/api/ec2/test_cloud.py | 26 ++-- nova/tests/api/ec2/test_ec2_validate.py | 2 +- .../compute/plugins/v3/test_servers.py | 10 +- .../api/openstack/compute/test_servers.py | 8 +- nova/tests/compute/test_compute.py | 8 +- nova/tests/compute/test_compute_api.py | 2 +- nova/tests/compute/test_compute_utils.py | 2 +- nova/tests/image/fake.py | 2 +- nova/tests/image/test_glance.py | 133 ++++++++---------- nova/tests/image/test_s3.py | 2 +- nova/tests/scheduler/test_scheduler.py | 2 +- 14 files changed, 135 insertions(+), 136 deletions(-) diff --git a/nova/image/api.py b/nova/image/api.py index 3ececf2fc0..1b5db39f85 100644 --- a/nova/image/api.py +++ b/nova/image/api.py @@ -67,7 +67,7 @@ def get_all(self, context, **kwargs): session = self._get_session(context) return session.detail(context, **kwargs) - def get(self, context, id_or_uri): + def get(self, context, id_or_uri, include_locations=False): """Retrieves the information record for a single disk image. If the supplied identifier parameter is a UUID, the default driver will be used to return information about the image. If the supplied @@ -77,9 +77,16 @@ def get(self, context, id_or_uri): :param context: The `nova.context.Context` object for the request :param id_or_uri: A UUID identifier or an image URI to look up image information for. + :param include_locations: (Optional) include locations in the returned + dict of information if the image service API + supports it. If the image service API does + not support the locations attribute, it will + still be included in the returned dict, as an + empty list. """ session, image_id = self._get_session_and_image_id(context, id_or_uri) - return session.show(context, image_id) + return session.show(context, image_id, + include_locations=include_locations) def create(self, context, image_info, data=None): """Creates a new image record, optionally passing the image bits to diff --git a/nova/image/glance.py b/nova/image/glance.py index b28629a980..970e2a4f2b 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -284,18 +284,41 @@ def detail(self, context, **kwargs): return _images - def show(self, context, image_id): - """Returns a dict with image data for the given opaque image id.""" + def show(self, context, image_id, include_locations=False): + """Returns a dict with image data for the given opaque image id. + + :param context: The context object to pass to image client + :param image_id: The UUID of the image + :param include_locations: (Optional) include locations in the returned + dict of information if the image service API + supports it. If the image service API does + not support the locations attribute, it will + still be included in the returned dict, as an + empty list. + """ + version = 1 + if include_locations: + version = 2 try: - image = self._client.call(context, 1, 'get', image_id) + image = self._client.call(context, version, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not _is_image_available(context, image): raise exception.ImageNotFound(image_id=image_id) - base_image_meta = _translate_from_glance(image) - return base_image_meta + image = _translate_from_glance(image) + if include_locations: + locations = image.get('locations', None) or [] + du = image.get('direct_url', None) + if du: + locations.append({'url': du, 'metadata': {}}) + image['locations'] = locations + else: + image.pop('locations', None) + image.pop('direct_url', None) + + return image def _get_transfer_module(self, scheme): try: @@ -310,8 +333,8 @@ def _get_transfer_module(self, scheme): def download(self, context, image_id, data=None, dst_path=None): """Calls out to Glance for data and writes data.""" if CONF.glance.allowed_direct_url_schemes and dst_path is not None: - locations = _get_locations(self._client, context, image_id) - for entry in locations: + image = self.show(context, image_id, include_locations=True) + for entry in image.get('locations', []): loc_url = entry['url'] loc_meta = entry['metadata'] o = urlparse.urlparse(loc_url) @@ -396,25 +419,6 @@ def delete(self, context, image_id): return True -def _get_locations(client, context, image_id): - """Returns the direct url representing the backend storage location, - or None if this attribute is not shown by Glance. - """ - try: - image_meta = client.call(context, 2, 'get', image_id) - except Exception: - _reraise_translated_image_exception(image_id) - - if not _is_image_available(context, image_meta): - raise exception.ImageNotFound(image_id=image_id) - - locations = getattr(image_meta, 'locations', []) - du = getattr(image_meta, 'direct_url', None) - if du: - locations.append({'url': du, 'metadata': {}}) - return locations - - def _extract_query_params(params): _params = {} accepted_params = ('filters', 'marker', 'limit', @@ -538,7 +542,8 @@ def _extract_attributes(image): 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'checksum', - 'min_disk', 'min_ram', 'is_public'] + 'min_disk', 'min_ram', 'is_public', + 'direct_url', 'locations'] queued = getattr(image, 'status') == 'queued' queued_exclude_attrs = ['disk_format', 'container_format'] diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index 2c83e71b9c..0d69a9ac86 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -94,7 +94,7 @@ def setUp(self): self.flags(compute_driver='nova.virt.fake.FakeDriver', volume_api_class='nova.tests.fake_volume.API') - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return {'id': id, 'name': 'fake_name', 'container_format': 'ami', @@ -686,7 +686,7 @@ def _setUpImageSet(self, create_volumes_and_snapshots=False): 'mappings': mappings2, 'block_device_mapping': block_device_mapping2}} - def fake_show(meh, context, image_id): + def fake_show(meh, context, image_id, **kwargs): _images = [copy.deepcopy(image1), copy.deepcopy(image2)] for i in _images: if str(i['id']) == str(image_id): diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 0fb9b018c1..d2e41a67e8 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -142,7 +142,7 @@ def setUp(self): self.useFixture(fixtures.FakeLogger('boto')) fake_utils.stub_out_utils_spawn_n(self.stubs) - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return {'id': id, 'name': 'fake_name', 'container_format': 'ami', @@ -1503,7 +1503,7 @@ def _setUpImageSet(self, create_volumes_and_snapshots=False): 'mappings': mappings2, 'block_device_mapping': block_device_mapping2}} - def fake_show(meh, context, image_id): + def fake_show(meh, context, image_id, **kwargs): _images = [copy.deepcopy(image1), copy.deepcopy(image2)] for i in _images: if str(i['id']) == str(image_id): @@ -1599,7 +1599,7 @@ def test_describe_image_mapping(self): def test_describe_image_attribute(self): describe_image_attribute = self.cloud.describe_image_attribute - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'status': 'active', @@ -1667,7 +1667,7 @@ def test_modify_image_attribute(self): 'type': 'machine'}, 'is_public': False} - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return copy.deepcopy(fake_metadata) def fake_detail(self, context, **kwargs): @@ -1940,7 +1940,7 @@ def test_run_instances(self): 'max_count': 1} run_instances = self.cloud.run_instances - def fake_show(self, context, id): + def fake_show(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'properties': { @@ -1971,7 +1971,7 @@ def test_run_instances_invalid_maxcount(self): 'max_count': 0} run_instances = self.cloud.run_instances - def fake_show(self, context, id): + def fake_show(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'container_format': 'ami', @@ -1991,7 +1991,7 @@ def test_run_instances_invalid_mincount(self): 'min_count': 0} run_instances = self.cloud.run_instances - def fake_show(self, context, id): + def fake_show(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'container_format': 'ami', @@ -2012,7 +2012,7 @@ def test_run_instances_invalid_count(self): 'min_count': 2} run_instances = self.cloud.run_instances - def fake_show(self, context, id): + def fake_show(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'container_format': 'ami', @@ -2034,7 +2034,7 @@ def test_run_instances_availability_zone(self): } run_instances = self.cloud.run_instances - def fake_show(self, context, id): + def fake_show(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'properties': { @@ -2085,7 +2085,7 @@ def test_run_instances_idempotent(self): run_instances = self.cloud.run_instances - def fake_show(self, context, id): + def fake_show(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'properties': { @@ -2185,7 +2185,7 @@ def test_run_instances_image_status_active(self): 'max_count': 1} run_instances = self.cloud.run_instances - def fake_show_stat_active(self, context, id): + def fake_show_stat_active(self, context, id, **kwargs): return {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fake_name', 'container_format': 'ami', @@ -2461,7 +2461,7 @@ def _do_test_create_image(self, no_reboot): 'max_count': 1} ec2_instance_id = self._run_instance(**kwargs) - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): bdm = [dict(snapshot_id=snapshots[0], volume_size=1, device_name='sda1', @@ -2777,7 +2777,7 @@ def test_dia_iisb(expected_result, **kwargs): test_dia_iisb('stop', image_id='ami-2', block_device_mapping=block_device_mapping) - def fake_show(self, context, id_): + def fake_show(self, context, id_, **kwargs): LOG.debug("id_ %s", id_) prop = {} diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index 61861e6b57..f09de8958f 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -86,7 +86,7 @@ def dumb(*args, **kwargs): self.volume_id_exception_map.extend([(x, exception.VolumeNotFound) for x in self.EC2_VALID__IDS]) - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return {'id': id, 'container_format': 'ami', 'properties': { diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 5d2d483fe7..f1f08a7d6d 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -1413,7 +1413,7 @@ def test_rebuild_instance_with_metadata_value_too_long(self): def test_rebuild_instance_fails_when_min_ram_too_small(self): # make min_ram larger than our instance ram size - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', properties={'key1': 'value1'}, @@ -1428,7 +1428,7 @@ def fake_get_image(self, context, image_href): def test_rebuild_instance_fails_when_min_disk_too_small(self): # make min_disk larger than our instance disk size - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', properties={'key1': 'value1'}, @@ -1444,7 +1444,7 @@ def test_rebuild_instance_image_too_large(self): # make image size larger than our instance disk size size = str(1000 * (1024 ** 3)) - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', size=size) @@ -1456,7 +1456,7 @@ def fake_get_image(self, context, image_href): self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_name_all_blank(self): - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active') @@ -1468,7 +1468,7 @@ def fake_get_image(self, context, image_href): self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_with_deleted_image(self): - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='DELETED') diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 56306e1c77..fa67c8d3a4 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -1562,7 +1562,7 @@ def test_rebuild_instance_with_metadata_value_too_long(self): def test_rebuild_instance_fails_when_min_ram_too_small(self): # make min_ram larger than our instance ram size - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', properties={'key1': 'value1'}, @@ -1577,7 +1577,7 @@ def fake_get_image(self, context, image_href): def test_rebuild_instance_fails_when_min_disk_too_small(self): # make min_disk larger than our instance disk size - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', properties={'key1': 'value1'}, @@ -1593,7 +1593,7 @@ def test_rebuild_instance_image_too_large(self): # make image size larger than our instance disk size size = str(1000 * (1024 ** 3)) - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', size=size) @@ -1604,7 +1604,7 @@ def fake_get_image(self, context, image_href): self.controller._action_rebuild, self.req, FAKE_UUID, self.body) def test_rebuild_instance_with_deleted_image(self): - def fake_get_image(self, context, image_href): + def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='DELETED') diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 6448ceb2b6..c5e17014eb 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -223,7 +223,7 @@ def fake_compute_node_delete(context, compute_node_id): self.none_quotas = objects.Quotas.from_reservations( self.context, None) - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): if id: return {'id': id, 'min_disk': None, 'min_ram': None, 'name': 'fake_name', @@ -2835,7 +2835,7 @@ def test_snapshot_fails_cleanup_ignores_exception(self): def _test_snapshot_deletes_image_on_failure(self, status, exc): self.fake_image_delete_called = False - def fake_show(self_, context, image_id): + def fake_show(self_, context, image_id, **kwargs): self.assertEqual('fakesnap', image_id) image = {'id': image_id, 'status': status} @@ -6842,7 +6842,7 @@ def fake_get_nw_info(cls, ctxt, instance): 'ramdisk_id': 'fake_ramdisk_id'}, } - def fake_show(obj, context, image_id): + def fake_show(obj, context, image_id, **kwargs): if image_id: return self.fake_image else: @@ -10574,7 +10574,7 @@ class ComputeInactiveImageTestCase(BaseTestCase): def setUp(self): super(ComputeInactiveImageTestCase, self).setUp() - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return {'id': id, 'min_disk': None, 'min_ram': None, 'name': 'fake_name', 'status': 'deleted', diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index 3356e458b1..73e9ba4e7e 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -1833,7 +1833,7 @@ def _setup_fake_image_with_disabled_disk_config(self): 'properties': {"auto_disk_config": "Disabled"}, } - def fake_show(obj, context, image_id): + def fake_show(obj, context, image_id, **kwargs): return self.fake_image fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py index b35d768224..a9d13b00bb 100644 --- a/nova/tests/compute/test_compute_utils.py +++ b/nova/tests/compute/test_compute_utils.py @@ -409,7 +409,7 @@ def fake_get_nw_info(cls, ctxt, instance): self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} self.stubs.Set(nova.tests.image.fake._FakeImageService, diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py index e810ef4e99..1a6adce9c9 100644 --- a/nova/tests/image/fake.py +++ b/nova/tests/image/fake.py @@ -167,7 +167,7 @@ def download(self, context, image_id, dst_path=None, data=None): with open(dst_path, 'wb') as data: data.write(self._imagedata.get(image_id, '')) - def show(self, context, image_id): + def show(self, context, image_id, include_locations=False): """Get data about specified image. Returns a dict containing image data for the given opaque image id. diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index c36eec662d..73f2720a61 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -219,7 +219,8 @@ class MyGlanceStubClient(glance_stubs.StubGlanceClient): def get(self, image_id): return type('GlanceTestDirectUrlMeta', (object,), - {'direct_url': 'file://%s' + self.s_tmpfname}) + {'status': 'active', + 'direct_url': 'file://%s' + self.s_tmpfname}) client = MyGlanceStubClient() (outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst') @@ -248,7 +249,8 @@ class MyGlanceStubClient(glance_stubs.StubGlanceClient): def get(self, image_id): return type('GlanceLocations', (object,), - {'locations': [ + {'status': 'active', + 'locations': [ {'url': 'file:///' + os.devnull, 'metadata': desc}]}) @@ -287,7 +289,8 @@ class MyGlanceStubClient(glance_stubs.StubGlanceClient): def get(self, image_id): return type('GlanceLocations', (object,), - {'locations': [ + {'status': 'active', + 'locations': [ {'url': 'file:///' + os.devnull, 'metadata': desc}]}) @@ -327,7 +330,8 @@ class MyGlanceStubClient(glance_stubs.StubGlanceClient): def get(self, image_id): return type('GlanceLocations', (object,), - {'locations': [{'url': file_url, + {'status': 'active', + 'locations': [{'url': file_url, 'metadata': file_system_desc}]}) def data(self, image_id): @@ -362,7 +366,8 @@ class MyGlanceStubClient(glance_stubs.StubGlanceClient): def get(self, image_id): return type('GlanceLocations', (object,), - {'locations': [{'url': file_url, + {'status': 'active', + 'locations': [{'url': file_url, 'metadata': {}}]}) def data(self, image_id): @@ -488,6 +493,8 @@ def __init__(self, metadata): 'status': None, 'properties': {}, 'owner': None, + 'locations': None, + 'direct_url': None } self.assertEqual(expected, observed) @@ -504,68 +511,6 @@ def get(self, image_id): return MyGlanceStubClient() -class TestGetLocations(test.NoDBTestCase): - """Tests the internal _get_locations function.""" - - class ImageSpecV2(object): - visibility = None - properties = None - locations = None - direct_url = None - - @mock.patch('nova.image.glance._is_image_available') - def test_success_has_locations(self, avail_mock): - avail_mock.return_value = True - locations = [ - mock.sentinel.loc1 - ] - image_meta = mock.MagicMock(locations=locations, - spec=TestGetLocations.ImageSpecV2) - - client_mock = mock.MagicMock() - client_mock.call.return_value = image_meta - locs = glance._get_locations(client_mock, mock.sentinel.ctx, - mock.sentinel.image_id) - client_mock.call.assert_called_once_with(mock.sentinel.ctx, - 2, 'get', - mock.sentinel.image_id) - self.assertEqual(locations, locs) - avail_mock.assert_called_once_with(mock.sentinel.ctx, image_meta) - - @mock.patch('nova.image.glance._is_image_available') - def test_success_direct_uri_added_to_locations(self, avail_mock): - avail_mock.return_value = True - locations = [ - mock.sentinel.loc1 - ] - image_meta = mock.MagicMock(locations=locations, - spec=TestGetLocations.ImageSpecV2, - direct_uri=mock.sentinel.duri) - - client_mock = mock.MagicMock() - client_mock.call.return_value = image_meta - locs = glance._get_locations(client_mock, mock.sentinel.ctx, - mock.sentinel.image_id) - client_mock.call.assert_called_once_with(mock.sentinel.ctx, - 2, 'get', - mock.sentinel.image_id) - expected = locations - expected.append({"url": mock.sentinel.duri, "metadata": {}}) - self.assertEqual(expected, locs) - - @mock.patch('nova.image.glance._reraise_translated_image_exception') - @mock.patch('nova.image.glance._is_image_available') - def test_get_locations_not_found(self, avail_mock, reraise_mock): - raised = exception.ImageNotFound(image_id=123) - reraise_mock.side_effect = raised - - client_mock = mock.MagicMock() - client_mock.call.side_effect = glanceclient.exc.NotFound - self.assertRaises(exception.ImageNotFound, glance._get_locations, - client_mock, mock.sentinel.ctx, - mock.sentinel.image_id) - - class TestIsImageAvailable(test.NoDBTestCase): """Tests the internal _is_image_available function.""" @@ -697,18 +642,19 @@ class TestShow(test.NoDBTestCase): @mock.patch('nova.image.glance._is_image_available') def test_show_success(self, is_avail_mock, trans_from_mock): is_avail_mock.return_value = True - trans_from_mock.return_value = mock.sentinel.trans_from + trans_from_mock.return_value = {'mock': mock.sentinel.trans_from} client = mock.MagicMock() - client.call.return_value = mock.sentinel.images_0 + client.call.return_value = {} ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) info = service.show(ctx, mock.sentinel.image_id) client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) - is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0) - trans_from_mock.assert_called_once_with(mock.sentinel.images_0) - self.assertEqual(mock.sentinel.trans_from, info) + is_avail_mock.assert_called_once_with(ctx, {}) + trans_from_mock.assert_called_once_with({}) + self.assertIn('mock', info) + self.assertEqual(mock.sentinel.trans_from, info['mock']) @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') @@ -753,7 +699,7 @@ def test_show_queued_image_without_some_attrs(self, is_avail_mock): client = mock.MagicMock() # fake image cls without disk_format, container_format, name attributes - class fake_image_cls(object): + class fake_image_cls(dict): id = 'b31aa5dd-f07a-4748-8f15-398346887584' deleted = False protected = False @@ -781,6 +727,47 @@ class fake_image_cls(object): self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys())) + @mock.patch('nova.image.glance._translate_from_glance') + @mock.patch('nova.image.glance._is_image_available') + def test_include_locations_success(self, avail_mock, trans_from_mock): + locations = [mock.sentinel.loc1] + avail_mock.return_value = True + trans_from_mock.return_value = {'locations': locations} + + client = mock.Mock() + client.call.return_value = mock.sentinel.image + service = glance.GlanceImageService(client) + ctx = mock.sentinel.ctx + image_id = mock.sentinel.image_id + info = service.show(ctx, image_id, include_locations=True) + + client.call.assert_called_once_with(ctx, 2, 'get', image_id) + avail_mock.assert_called_once_with(ctx, mock.sentinel.image) + trans_from_mock.assert_called_once_with(mock.sentinel.image) + self.assertIn('locations', info) + self.assertEqual(locations, info['locations']) + + @mock.patch('nova.image.glance._translate_from_glance') + @mock.patch('nova.image.glance._is_image_available') + def test_include_direct_uri_success(self, avail_mock, trans_from_mock): + locations = [mock.sentinel.loc1] + avail_mock.return_value = True + trans_from_mock.return_value = {'locations': locations, + 'direct_uri': mock.sentinel.duri} + + client = mock.Mock() + client.call.return_value = mock.sentinel.image + service = glance.GlanceImageService(client) + ctx = mock.sentinel.ctx + image_id = mock.sentinel.image_id + info = service.show(ctx, image_id, include_locations=True) + + client.call.assert_called_once_with(ctx, 2, 'get', image_id) + expected = locations + expected.append({'url': mock.sentinel.duri, 'metadata': {}}) + self.assertIn('locations', info) + self.assertEqual(expected, info['locations']) + class TestDetail(test.NoDBTestCase): diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py index d5fb984a30..7472e0fa79 100644 --- a/nova/tests/image/test_s3.py +++ b/nova/tests/image/test_s3.py @@ -123,7 +123,7 @@ def test_show_translates_correctly(self): self.image_service.show(self.context, '1') def test_show_translates_image_state_correctly(self): - def my_fake_show(self, context, image_id): + def my_fake_show(self, context, image_id, **kwargs): fake_state_map = { '155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading', 'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt', diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index b3d0c7343d..eb1f3d4888 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -280,7 +280,7 @@ def setUp(self): super(SchedulerTestCase, self).setUp() self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI) - def fake_show(meh, context, id): + def fake_show(meh, context, id, **kwargs): if id: return {'id': id, 'min_disk': None, 'min_ram': None, 'name': 'fake_name', From 112b140e2daa7207a8d37c77a92456b155f3ecb9 Mon Sep 17 00:00:00 2001 From: Dmitry Borodaenko Date: Mon, 19 May 2014 16:30:14 -0700 Subject: [PATCH 203/486] Enable cloning for rbd-backed ephemeral disks Currently when using rbd as an image backend, nova downloads the glance image to local disk and then copies it again into rbd. This can be very slow for large images, and wastes bandwidth as well as disk space. When the glance image is stored in the same ceph cluster, the data is being pulled out and pushed back in unnecessarily. Instead, create a copy-on-write clone of the image. This is fast, and does not depend on the size of the image. Instead of taking minutes, booting takes seconds, and is not limited by the disk copy. Add some rbd utility functions from cinder to support cloning and let the rbd imagebackend rely on librbd instead of the rbd command line tool for checking image existence. Adds a new clone() method to the image backend, so backends like rbd can make optimizations like this. Try to use clone() for the root disk when it comes from an image, but fall back to fetch_to_raw() if clone() fails. Instead of calling disk.get_disk_size() directly from verify_base_size(), which assumes the disk is stored locally, add a new method that is overridden by the Rbd subclass to get the disk size. DocImpact Implements: blueprint rbd-clone-image-handler Closes-Bug: 1226351 Co-Authored-By: Josh Durgin Signed-Off-By: Josh Durgin Signed-Off-By: Zhi Yan Liu Signed-Off-By: Dmitry Borodaenko Change-Id: I0f50659b54a92fc21086990be8925ea15008569a --- nova/tests/virt/libvirt/test_imagebackend.py | 67 ++++++++++-- nova/tests/virt/libvirt/test_rbd.py | 101 ++++++++++++++++++- nova/virt/libvirt/driver.py | 24 +++-- nova/virt/libvirt/imagebackend.py | 76 ++++++++++++-- nova/virt/libvirt/rbd.py | 95 +++++++++++++++-- 5 files changed, 330 insertions(+), 33 deletions(-) diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py index 06f4f2e7ee..de429ccf2a 100644 --- a/nova/tests/virt/libvirt/test_imagebackend.py +++ b/nova/tests/virt/libvirt/test_imagebackend.py @@ -380,14 +380,14 @@ def test_create_image_with_size(self): def test_create_image_too_small(self): fn = self.prepare_mocks() self.mox.StubOutWithMock(os.path, 'exists') - self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size') + self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size') if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.DISK_INFO_PATH).AndReturn(False) os.path.exists(self.INSTANCES_PATH).AndReturn(True) os.path.exists(self.TEMPLATE_PATH).AndReturn(True) - imagebackend.disk.get_disk_size(self.TEMPLATE_PATH - ).AndReturn(self.SIZE) + imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH + ).AndReturn(self.SIZE) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) @@ -693,6 +693,7 @@ def test_cache(self): self.mox.VerifyAll() def test_cache_base_dir_exists(self): + fn = self.mox.CreateMockAnything() image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(os.path, 'exists') @@ -748,22 +749,72 @@ def test_create_image(self): fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) - self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size') - imagebackend.disk.get_disk_size(self.TEMPLATE_PATH - ).AndReturn(self.SIZE) + image = self.image_class(self.INSTANCE, self.NAME) + self.mox.StubOutWithMock(image, 'check_image_exists') + image.check_image_exists().AndReturn(False) + image.check_image_exists().AndReturn(False) + self.mox.ReplayAll() + + image.create_image(fn, self.TEMPLATE_PATH, None) + rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH, rbd_name, '--new-format', '--id', self.USER, '--conf', self.CONF) - self.mox.ReplayAll() + self.assertEqual(fake_processutils.fake_execute_get_log(), + [' '.join(cmd)]) + self.mox.VerifyAll() + + def test_create_image_resize(self): + fn = self.mox.CreateMockAnything() + full_size = self.SIZE * 2 + fn(max_size=full_size, target=self.TEMPLATE_PATH) + + rbd.rbd.RBD_FEATURE_LAYERING = 1 + + fake_processutils.fake_execute_clear_log() + fake_processutils.stub_out_processutils_execute(self.stubs) image = self.image_class(self.INSTANCE, self.NAME) - image.create_image(fn, self.TEMPLATE_PATH, None) + self.mox.StubOutWithMock(image, 'check_image_exists') + image.check_image_exists().AndReturn(False) + image.check_image_exists().AndReturn(False) + rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) + cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH, + rbd_name, '--new-format', '--id', self.USER, + '--conf', self.CONF) + self.mox.StubOutWithMock(image, 'get_disk_size') + image.get_disk_size(rbd_name).AndReturn(self.SIZE) + self.mox.StubOutWithMock(image.driver, 'resize') + image.driver.resize(rbd_name, full_size) + + self.mox.ReplayAll() + + image.create_image(fn, self.TEMPLATE_PATH, full_size) self.assertEqual(fake_processutils.fake_execute_get_log(), [' '.join(cmd)]) self.mox.VerifyAll() + def test_create_image_already_exists(self): + rbd.rbd.RBD_FEATURE_LAYERING = 1 + + image = self.image_class(self.INSTANCE, self.NAME) + self.mox.StubOutWithMock(image, 'check_image_exists') + image.check_image_exists().AndReturn(True) + self.mox.StubOutWithMock(image, 'get_disk_size') + image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE) + image.check_image_exists().AndReturn(True) + rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME) + image.get_disk_size(rbd_name).AndReturn(self.SIZE) + + self.mox.ReplayAll() + + fn = self.mox.CreateMockAnything() + image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) + + self.mox.VerifyAll() + def test_prealloc_image(self): CONF.set_override('preallocate_images', 'space') diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/virt/libvirt/test_rbd.py index eb5b57b743..415f2eccd7 100644 --- a/nova/tests/virt/libvirt/test_rbd.py +++ b/nova/tests/virt/libvirt/test_rbd.py @@ -13,6 +13,7 @@ import mock +from nova import exception from nova.openstack.common import log as logging from nova import test from nova import utils @@ -81,6 +82,70 @@ def setUp(self, mock_rados, mock_rbd): def tearDown(self): super(RbdTestCase, self).tearDown() + def test_good_locations(self): + locations = ['rbd://fsid/pool/image/snap', + 'rbd://%2F/%2F/%2F/%2F', ] + map(self.driver.parse_url, locations) + + def test_bad_locations(self): + locations = ['rbd://image', + 'http://path/to/somewhere/else', + 'rbd://image/extra', + 'rbd://image/', + 'rbd://fsid/pool/image/', + 'rbd://fsid/pool/image/snap/', + 'rbd://///', ] + for loc in locations: + self.assertRaises(exception.ImageUnacceptable, + self.driver.parse_url, loc) + self.assertFalse(self.driver.is_cloneable({'url': loc}, + {'disk_format': 'raw'})) + + @mock.patch.object(rbd.RBDDriver, '_get_fsid') + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid): + mock_get_fsid.return_value = 'abc' + location = {'url': 'rbd://abc/pool/image/snap'} + info = {'disk_format': 'raw'} + self.assertTrue(self.driver.is_cloneable(location, info)) + self.assertTrue(mock_get_fsid.called) + + @mock.patch.object(rbd.RBDDriver, '_get_fsid') + def test_uncloneable_different_fsid(self, mock_get_fsid): + mock_get_fsid.return_value = 'abc' + location = {'url': 'rbd://def/pool/image/snap'} + self.assertFalse( + self.driver.is_cloneable(location, {'disk_format': 'raw'})) + self.assertTrue(mock_get_fsid.called) + + @mock.patch.object(rbd.RBDDriver, '_get_fsid') + @mock.patch.object(rbd, 'RBDVolumeProxy') + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy, + mock_get_fsid): + mock_get_fsid.return_value = 'abc' + location = {'url': 'rbd://abc/pool/image/snap'} + + mock_proxy.side_effect = mock_rbd.Error + + self.assertFalse( + self.driver.is_cloneable(location, {'disk_format': 'raw'})) + mock_proxy.assert_called_once_with(self.driver, 'image', pool='pool', + snapshot='snap', read_only=True) + self.assertTrue(mock_get_fsid.called) + + @mock.patch.object(rbd.RBDDriver, '_get_fsid') + def test_uncloneable_bad_format(self, mock_get_fsid): + mock_get_fsid.return_value = 'abc' + location = {'url': 'rbd://abc/pool/image/snap'} + formats = ['qcow2', 'vmdk', 'vdi'] + for f in formats: + self.assertFalse( + self.driver.is_cloneable(location, {'disk_format': f})) + self.assertTrue(mock_get_fsid.called) + @mock.patch.object(utils, 'execute') def test_get_mon_addrs(self, mock_execute): mock_execute.return_value = (CEPH_MON_DUMP, '') @@ -88,6 +153,37 @@ def test_get_mon_addrs(self, mock_execute): ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver.get_mon_addrs()) + @mock.patch.object(rbd, 'RADOSClient') + @mock.patch.object(rbd, 'rbd') + @mock.patch.object(rbd, 'rados') + def test_clone(self, mock_rados, mock_rbd, mock_client): + pool = u'images' + image = u'image-name' + snap = u'snapshot-name' + location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)} + + client_stack = [] + + def mock__enter__(inst): + def _inner(): + client_stack.append(inst) + return inst + return _inner + + client = mock_client.return_value + # capture both rados client used to perform the clone + client.__enter__.side_effect = mock__enter__(client) + + rbd = mock_rbd.RBD.return_value + + self.driver.clone(location, self.volume_name) + + args = [client_stack[0].ioctx, str(image), str(snap), + client_stack[1].ioctx, str(self.volume_name)] + kwargs = {'features': mock_rbd.RBD_FEATURE_LAYERING} + rbd.clone.assert_called_once_with(*args, **kwargs) + self.assertEqual(client.__enter__.call_count, 2) + @mock.patch.object(rbd, 'RBDVolumeProxy') def test_resize(self, mock_proxy): size = 1024 @@ -163,8 +259,11 @@ def test_ceph_args_rbd_user_and_ceph_conf(self): @mock.patch.object(rbd, 'RBDVolumeProxy') def test_exists(self, mock_proxy): + snapshot = 'snap' proxy = mock_proxy.return_value - self.assertTrue(self.driver.exists(self.volume_name)) + self.assertTrue(self.driver.exists(self.volume_name, + self.rbd_pool, + snapshot)) proxy.__enter__.assert_called_once_with() proxy.__exit__.assert_called_once_with(None, None, None) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 11ebf34076..47d670b6a4 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2684,13 +2684,23 @@ def raw(fname): if size == 0 or suffix == '.rescue': size = None - image('disk').cache(fetch_func=libvirt_utils.fetch_image, - context=context, - filename=root_fname, - size=size, - image_id=disk_images['image_id'], - user_id=instance['user_id'], - project_id=instance['project_id']) + backend = image('disk') + if backend.SUPPORTS_CLONE: + def clone_fallback_to_fetch(*args, **kwargs): + try: + backend.clone(context, disk_images['image_id']) + except exception.ImageUnacceptable: + libvirt_utils.fetch_image(*args, **kwargs) + fetch_func = clone_fallback_to_fetch + else: + fetch_func = libvirt_utils.fetch_image + backend.cache(fetch_func=fetch_func, + context=context, + filename=root_fname, + size=size, + image_id=disk_images['image_id'], + user_id=instance['user_id'], + project_id=instance['project_id']) # Lookup the filesystem type if required os_type_with_default = disk.get_fs_type_for_os_type( diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 9cf96e4c66..184d7fe742 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -23,6 +23,7 @@ from nova import exception from nova.i18n import _ from nova.i18n import _LE +from nova import image from nova.openstack.common import excutils from nova.openstack.common import fileutils from nova.openstack.common import jsonutils @@ -72,11 +73,14 @@ CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume', group='libvirt') LOG = logging.getLogger(__name__) +IMAGE_API = image.API() @six.add_metaclass(abc.ABCMeta) class Image(object): + SUPPORTS_CLONE = False + def __init__(self, source_type, driver_format, is_block_dev=False): """Image initialization. @@ -197,8 +201,7 @@ def _can_fallocate(self): 'path': self.path}) return can_fallocate - @staticmethod - def verify_base_size(base, size, base_size=0): + def verify_base_size(self, base, size, base_size=0): """Check that the base image is not larger than size. Since images can't be generally shrunk, enforce this constraint taking account of virtual image size. @@ -217,7 +220,7 @@ def verify_base_size(base, size, base_size=0): return if size and not base_size: - base_size = disk.get_disk_size(base) + base_size = self.get_disk_size(base) if size < base_size: msg = _LE('%(base)s virtual size %(base_size)s ' @@ -227,6 +230,9 @@ def verify_base_size(base, size, base_size=0): 'size': size}) raise exception.FlavorDiskTooSmall() + def get_disk_size(self, name): + disk.get_disk_size(name) + def snapshot_extract(self, target, out_format): raise NotImplementedError() @@ -295,6 +301,21 @@ def is_shared_block_storage(): """True if the backend puts images on a shared block storage.""" return False + def clone(self, context, image_id_or_uri): + """Clone an image. + + Note that clone operation is backend-dependent. The backend may ask + the image API for a list of image "locations" and select one or more + of those locations to clone an image from. + + :param image_id_or_uri: The ID or URI of an image to clone. + + :raises: exception.ImageUnacceptable if it cannot be cloned + """ + reason = _('clone() is not implemented') + raise exception.ImageUnacceptable(image_id=image_id_or_uri, + reason=reason) + class Raw(Image): def __init__(self, instance=None, disk_name=None, path=None): @@ -483,6 +504,9 @@ def snapshot_extract(self, target, out_format): class Rbd(Image): + + SUPPORTS_CLONE = True + def __init__(self, instance=None, disk_name=None, path=None, **kwargs): super(Rbd, self).__init__("block", "rbd", is_block_dev=True) if path: @@ -525,7 +549,7 @@ def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, info = vconfig.LibvirtConfigGuestDisk() hosts, ports = self.driver.get_mon_addrs() - info.device_type = device_type + info.source_device = device_type info.driver_format = 'raw' info.driver_cache = cache_mode info.target_bus = disk_bus @@ -552,16 +576,27 @@ def _can_fallocate(self): def check_image_exists(self): return self.driver.exists(self.rbd_name) + def get_disk_size(self, name): + """Returns the size of the virtual disk in bytes. + + The name argument is ignored since this backend already knows + its name, and callers may pass a non-existent local file path. + """ + return self.driver.size(self.rbd_name) + def create_image(self, prepare_template, base, size, *args, **kwargs): - if not os.path.exists(base): + + if not self.check_image_exists(): prepare_template(target=base, max_size=size, *args, **kwargs) else: self.verify_base_size(base, size) - self.driver.import_image(base, self.rbd_name) + # prepare_template() may have cloned the image into a new rbd + # image already instead of downloading it locally + if not self.check_image_exists(): + self.driver.import_image(base, self.rbd_name) - base_size = disk.get_disk_size(base) - if size and size > base_size: + if size and size > self.get_disk_size(self.rbd_name): self.driver.resize(self.rbd_name, size) def snapshot_extract(self, target, out_format): @@ -571,6 +606,31 @@ def snapshot_extract(self, target, out_format): def is_shared_block_storage(): return True + def clone(self, context, image_id_or_uri): + if not self.driver.supports_layering(): + reason = _('installed version of librbd does not support cloning') + raise exception.ImageUnacceptable(image_id=image_id_or_uri, + reason=reason) + + image_meta = IMAGE_API.get(context, image_id_or_uri, + include_locations=True) + locations = image_meta['locations'] + + LOG.debug('Image locations are: %(locs)s' % {'locs': locations}) + + if image_meta.get('disk_format') not in ['raw', 'iso']: + reason = _('Image is not raw format') + raise exception.ImageUnacceptable(image_id=image_id_or_uri, + reason=reason) + + for location in locations: + if self.driver.is_cloneable(location, image_meta): + return self.driver.clone(location, self.rbd_name) + + reason = _('No image locations are accessible') + raise exception.ImageUnacceptable(image_id=image_id_or_uri, + reason=reason) + class Backend(object): def __init__(self, use_cow): diff --git a/nova/virt/libvirt/rbd.py b/nova/virt/libvirt/rbd.py index 78084805d4..a7507f8bed 100644 --- a/nova/virt/libvirt/rbd.py +++ b/nova/virt/libvirt/rbd.py @@ -14,6 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. +import urllib + try: import rados import rbd @@ -21,9 +23,11 @@ rados = None rbd = None +from nova import exception from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LW +from nova.openstack.common import excutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils @@ -40,14 +44,23 @@ class RBDVolumeProxy(object): The underlying librados client and ioctx can be accessed as the attributes 'client' and 'ioctx'. """ - def __init__(self, driver, name, pool=None): + def __init__(self, driver, name, pool=None, snapshot=None, + read_only=False): client, ioctx = driver._connect_to_rados(pool) try: - self.volume = rbd.Image(ioctx, str(name), snapshot=None) + snap_name = snapshot.encode('utf8') if snapshot else None + self.volume = rbd.Image(ioctx, name.encode('utf8'), + snapshot=snap_name, + read_only=read_only) + except rbd.ImageNotFound: + with excutils.save_and_reraise_exception(): + LOG.debug("rbd image %s does not exist", name) + driver._disconnect_from_rados(client, ioctx) except rbd.Error: - LOG.exception(_LE("error opening rbd image %s"), name) - driver._disconnect_from_rados(client, ioctx) - raise + with excutils.save_and_reraise_exception(): + LOG.exception(_LE("error opening rbd image %s"), name) + driver._disconnect_from_rados(client, ioctx) + self.driver = driver self.client = client self.ioctx = ioctx @@ -94,8 +107,8 @@ def _connect_to_rados(self, pool=None): conffile=self.ceph_conf) try: client.connect() - pool_to_open = str(pool or self.pool) - ioctx = client.open_ioctx(pool_to_open) + pool_to_open = pool or self.pool + ioctx = client.open_ioctx(pool_to_open.encode('utf-8')) return client, ioctx except rados.Error: # shutdown cannot raise an exception @@ -139,6 +152,67 @@ def get_mon_addrs(self): ports.append(port) return hosts, ports + def parse_url(self, url): + prefix = 'rbd://' + if not url.startswith(prefix): + reason = _('Not stored in rbd') + raise exception.ImageUnacceptable(image_id=url, reason=reason) + pieces = map(urllib.unquote, url[len(prefix):].split('/')) + if '' in pieces: + reason = _('Blank components') + raise exception.ImageUnacceptable(image_id=url, reason=reason) + if len(pieces) != 4: + reason = _('Not an rbd snapshot') + raise exception.ImageUnacceptable(image_id=url, reason=reason) + return pieces + + def _get_fsid(self): + with RADOSClient(self) as client: + return client.cluster.get_fsid() + + def is_cloneable(self, image_location, image_meta): + url = image_location['url'] + try: + fsid, pool, image, snapshot = self.parse_url(url) + except exception.ImageUnacceptable as e: + LOG.debug('not cloneable: %s', e) + return False + + if self._get_fsid() != fsid: + reason = '%s is in a different ceph cluster' % url + LOG.debug(reason) + return False + + if image_meta['disk_format'] != 'raw': + reason = ("rbd image clone requires image format to be " + "'raw' but image {0} is '{1}'").format( + url, image_meta['disk_format']) + LOG.debug(reason) + return False + + # check that we can read the image + try: + return self.exists(image, pool=pool, snapshot=snapshot) + except rbd.Error as e: + LOG.debug('Unable to open image %(loc)s: %(err)s' % + dict(loc=url, err=e)) + return False + + def clone(self, image_location, dest_name): + _fsid, pool, image, snapshot = self.parse_url( + image_location['url']) + LOG.debug('cloning %(pool)s/%(img)s@%(snap)s' % + dict(pool=pool, img=image, snap=snapshot)) + with RADOSClient(self, str(pool)) as src_client: + with RADOSClient(self) as dest_client: + # pylint: disable E1101 + rbd.RBD().clone(src_client.ioctx, + image.encode('utf-8'), + snapshot.encode('utf-8'), + dest_client.ioctx, + dest_name, + features=rbd.RBD_FEATURE_LAYERING) + def size(self, name): with RBDVolumeProxy(self, name) as vol: return vol.size() @@ -153,9 +227,12 @@ def resize(self, name, size): with RBDVolumeProxy(self, name) as vol: vol.resize(size) - def exists(self, name): + def exists(self, name, pool=None, snapshot=None): try: - with RBDVolumeProxy(self, name): + with RBDVolumeProxy(self, name, + pool=pool, + snapshot=snapshot, + read_only=True): return True except rbd.ImageNotFound: return False From d1d37d1b052039641c121f4a049cf11c43368758 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Mon, 30 Jun 2014 10:17:40 +0900 Subject: [PATCH 204/486] Extend API schema for "create a server" extensions Some v2.1/v3 APIs(create/update/rebuild server) contains the API extension feature, it searches extension methods with its method name and operates them automatically. It is necessary to change the API schemas also automatically based on the API extension loading condition, because the API extensions add API parameters. This patch adds API schema extensions which are implemented with the same mechanism (stevedore library) as the API extension for "create a server" extensions. As a sample of API schema extensions, this patch adds the schema of keypair extension. Partially implements blueprint v3-api-schema Change-Id: I972d648546b1fcb37c6ad5e12a0a49ce810a35f9 --- .../openstack/compute/plugins/v3/keypairs.py | 3 ++ .../openstack/compute/plugins/v3/servers.py | 28 +++++++++++++-- .../openstack/compute/schemas/v3/keypairs.py | 4 +++ .../openstack/compute/schemas/v3/servers.py | 36 +++++++++++++++++++ .../compute/plugins/v3/test_servers.py | 33 +++++++++++++++-- 5 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 nova/api/openstack/compute/schemas/v3/servers.py diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py index 1abbeacfe7..2f8e450108 100644 --- a/nova/api/openstack/compute/plugins/v3/keypairs.py +++ b/nova/api/openstack/compute/plugins/v3/keypairs.py @@ -180,3 +180,6 @@ def get_controller_extensions(self): # server create kwargs def server_create(self, server_dict, create_kwargs): create_kwargs['key_name'] = server_dict.get('key_name') + + def get_server_create_schema(self): + return keypairs.server_create diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 8d647fd3f7..5f9554c7be 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -25,9 +25,11 @@ from webob import exc from nova.api.openstack import common +from nova.api.openstack.compute.schemas.v3 import servers as schema_servers from nova.api.openstack.compute.views import servers as views_servers from nova.api.openstack import extensions from nova.api.openstack import wsgi +from nova.api import validation from nova import compute from nova.compute import flavors from nova import exception @@ -70,6 +72,8 @@ class ServersController(wsgi.Controller): _view_builder_class = views_servers.ViewBuilderV3 + schema_server_create = schema_servers.base_create + @staticmethod def _add_location(robj): # Just in case... @@ -167,6 +171,20 @@ def check_load_extension(ext): if not list(self.update_extension_manager): LOG.debug("Did not find any server update extensions") + # Look for API schema of server create extension + self.create_schema_manager = \ + stevedore.enabled.EnabledExtensionManager( + namespace=self.EXTENSION_CREATE_NAMESPACE, + check_func=_check_load_extension('get_server_create_schema'), + invoke_on_load=True, + invoke_kwds={"extension_info": self.extension_info}, + propagate_map_exceptions=True) + if list(self.create_schema_manager): + self.create_schema_manager.map(self._create_extension_schema, + self.schema_server_create) + else: + LOG.debug("Did not find any server create schemas") + @extensions.expected_errors((400, 403)) def index(self, req): """Returns a list of server names and ids for a given user.""" @@ -411,10 +429,9 @@ def show(self, req, id): @extensions.expected_errors((400, 409, 413)) @wsgi.response(202) + @validation.schema(schema_server_create) def create(self, req, body): """Creates a new server for a given user.""" - if not self.is_valid_body(body, 'server'): - raise exc.HTTPBadRequest(_("The request body is invalid")) context = req.environ['nova.context'] server_dict = body['server'] @@ -573,6 +590,13 @@ def _update_extension_point(self, ext, update_dict, update_kwargs): LOG.debug("Running _update_extension_point for %s", ext.obj) handler.server_update(update_dict, update_kwargs) + def _create_extension_schema(self, ext, create_schema): + handler = ext.obj + LOG.debug("Running _create_extension_schema for %s", ext.obj) + + schema = handler.get_server_create_schema() + create_schema['properties']['server']['properties'].update(schema) + def _delete(self, context, req, instance_uuid): instance = self._get_server(context, req, instance_uuid) if CONF.reclaim_instance_interval: diff --git a/nova/api/openstack/compute/schemas/v3/keypairs.py b/nova/api/openstack/compute/schemas/v3/keypairs.py index 08b1961247..8d4c9f2d23 100644 --- a/nova/api/openstack/compute/schemas/v3/keypairs.py +++ b/nova/api/openstack/compute/schemas/v3/keypairs.py @@ -31,3 +31,7 @@ 'required': ['keypair'], 'additionalProperties': False, } + +server_create = { + 'key_name': parameter_types.name, +} diff --git a/nova/api/openstack/compute/schemas/v3/servers.py b/nova/api/openstack/compute/schemas/v3/servers.py new file mode 100644 index 0000000000..a4a91b13c1 --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/servers.py @@ -0,0 +1,36 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +base_create = { + 'type': 'object', + 'properties': { + 'server': { + 'type': 'object', + 'properties': { + # TODO(oomichi): To focus the schema extension, now these + # properties are not defined. After it, we need to define + # them. + # 'name': ... + }, + # TODO(oomichi): After all extension schema patches are merged, + # this code should be enabled. If enabling before merger, API + # extension parameters would be considered as bad parameters. + # 'additionalProperties': False, + }, + }, + 'required': ['server'], + # TODO(oomichi): ditto, enable here after all extension schema + # patches are merged. + # 'additionalProperties': False, +} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 5d2d483fe7..baa7f1cf1b 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -15,6 +15,7 @@ # under the License. import base64 +import copy import datetime import uuid @@ -32,6 +33,8 @@ from nova.api.openstack.compute.plugins.v3 import ips from nova.api.openstack.compute.plugins.v3 import keypairs from nova.api.openstack.compute.plugins.v3 import servers +from nova.api.openstack.compute.schemas.v3 import keypairs as keypairs_schema +from nova.api.openstack.compute.schemas.v3 import servers as servers_schema from nova.api.openstack.compute import views from nova.api.openstack import extensions from nova.compute import api as compute_api @@ -2101,7 +2104,7 @@ def test_create_instance_raise_user_data_too_large(self, mock_create): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, - self.req, self.body) + self.req, body=self.body) def test_create_instance_with_network_with_no_subnet(self): network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' @@ -3080,7 +3083,7 @@ def _invalid_server_create(self, body): req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' - self.assertRaises(webob.exc.HTTPBadRequest, + self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_server_no_body(self): @@ -3154,3 +3157,29 @@ def test_load_rebuild_extension_point(self): def test_load_create_extension_point(self): self._test_load_extension_point('create') + + +class TestServersExtensionSchema(test.NoDBTestCase): + def setUp(self): + super(TestServersExtensionSchema, self).setUp() + CONF.set_override('extensions_whitelist', ['keypairs'], 'osapi_v3') + + def _test_load_extension_schema(self, name): + setattr(FakeExt, 'get_server_%s_schema' % name, + FakeExt.fake_extension_point) + ext_info = plugins.LoadedExtensionInfo() + controller = servers.ServersController(extension_info=ext_info) + self.assertTrue(hasattr(controller, '%s_schema_manager' % name)) + + delattr(FakeExt, 'get_server_%s_schema' % name) + return getattr(controller, 'schema_server_%s' % name) + + def test_load_create_extension_point(self): + # The expected is the schema combination of base and keypairs + # because of the above extensions_whitelist. + expected_schema = copy.deepcopy(servers_schema.base_create) + expected_schema['properties']['server']['properties'].update( + keypairs_schema.server_create) + + actual_schema = self._test_load_extension_schema('create') + self.assertEqual(expected_schema, actual_schema) From c71ef8e1095bf2d563ff5fae446ad1984be30f4d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 28 Jul 2014 19:51:31 +0000 Subject: [PATCH 205/486] Updated from global requirements Change-Id: Ide162a1145a2c8b67546812fbfcdc24a5753818b --- requirements.txt | 8 ++++---- test-requirements.txt | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index d118ccb14a..b21bf6f8d8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ pbr>=0.6,!=0.7,<1.0 -SQLAlchemy>=0.8.4,!=0.9.5,<=0.9.99 +SQLAlchemy>=0.8.4,<=0.8.99,>=0.9.7,<=0.9.99 anyjson>=0.3.3 argparse boto>=2.12.0,!=2.13.0 @@ -31,8 +31,8 @@ six>=1.7.0 stevedore>=0.14 websockify>=0.5.1,<0.6 wsgiref>=0.1.2 -oslo.config>=1.2.1 -oslo.rootwrap +oslo.config>=1.4.0.0a3 +oslo.rootwrap>=1.3.0.0a1 pycadf>=0.5.1 -oslo.messaging>=1.3.0 +oslo.messaging>=1.4.0.0a3 oslo.i18n>=0.1.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 9c67b37cb1..971529b382 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ coverage>=3.6 discover feedparser fixtures>=0.3.14 -libvirt-python>=1.2.5 +libvirt-python>=1.2.5 # LGPLv2+ mock>=1.0 mox>=0.5.3 MySQL-python From 6f983680667b7b95012f273e725b436aff611872 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jul 2014 13:18:07 -0700 Subject: [PATCH 206/486] Fix ImportError during docs generation nova/openstack/common/db/sqlalchemy/test_migrations.py has an explicit import for lockfile but lockfile is missing from requirements.txt Change-Id: I8ae62e5235d0348b9fcc30ef6269089930858aa6 --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index d118ccb14a..2b21b3e51d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,3 +36,4 @@ oslo.rootwrap pycadf>=0.5.1 oslo.messaging>=1.3.0 oslo.i18n>=0.1.0 # Apache-2.0 +lockfile>=0.8 From 48905ac087e1de922e9230d52cc53250b17fb580 Mon Sep 17 00:00:00 2001 From: Vladik Romanovsky Date: Mon, 21 Jul 2014 08:35:22 -0400 Subject: [PATCH 207/486] Live migration is broken for NFS shared storage One of the new checks, introduced in I2755c59b4db736151000dae351fd776d3c15ca39 is missing a check against filebased shared storage, leading to live migration being broken. Closes-Bug: #1346385 Change-Id: I096ecd6eadc3b13ef918ca1e6c98acdd01556c0d --- nova/tests/virt/libvirt/test_driver.py | 60 ++++++++++++++++++++++++++ nova/virt/libvirt/driver.py | 8 ++-- 2 files changed, 64 insertions(+), 4 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index bb2c5266d1..21834aff14 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -4897,6 +4897,66 @@ def fake_plug_vifs(instance, network_info): conn.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info={}) + def test_pre_live_migration_image_not_created_with_shared_storage(self): + migrate_data_set = [{'is_shared_block_storage': False, + 'block_migration': False}, + {'is_shared_block_storage': True, + 'block_migration': False}, + {'is_shared_block_storage': False, + 'block_migration': True}] + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = db.instance_create(self.context, self.test_instance) + # creating mocks + with contextlib.nested( + mock.patch.object(conn, + '_create_images_and_backing'), + mock.patch.object(conn, + 'ensure_filtering_rules_for_instance'), + mock.patch.object(conn, 'plug_vifs'), + ) as ( + create_image_mock, + rules_mock, + plug_mock, + ): + for migrate_data in migrate_data_set: + res = conn.pre_live_migration(self.context, instance, + block_device_info=None, + network_info=[], disk_info={}, + migrate_data=migrate_data) + self.assertFalse(create_image_mock.called) + self.assertIsInstance(res, dict) + + def test_pre_live_migration_with_not_shared_instance_path(self): + migrate_data = {'is_shared_block_storage': False, + 'is_shared_instance_path': False} + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = db.instance_create(self.context, self.test_instance) + + def check_instance_dir(context, instance, + instance_dir, disk_info): + self.assertTrue(instance_dir) + # creating mocks + with contextlib.nested( + mock.patch.object(conn, + '_create_images_and_backing', + side_effect=check_instance_dir), + mock.patch.object(conn, + 'ensure_filtering_rules_for_instance'), + mock.patch.object(conn, 'plug_vifs'), + ) as ( + create_image_mock, + rules_mock, + plug_mock, + ): + res = conn.pre_live_migration(self.context, instance, + block_device_info=None, + network_info=[], disk_info={}, + migrate_data=migrate_data) + self.assertTrue(create_image_mock.called) + self.assertIsInstance(res, dict) + def test_get_instance_disk_info_works_correctly(self): # Test data instance_ref = db.instance_create(self.context, self.test_instance) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 6c39391ac2..d10f0e430d 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -4747,10 +4747,10 @@ def pre_live_migration(self, context, instance, block_device_info, raise exception.DestinationDiskExists(path=instance_dir) os.mkdir(instance_dir) - if not is_shared_block_storage: - # Ensure images and backing files are present. - self._create_images_and_backing(context, instance, - instance_dir, disk_info) + if not is_shared_block_storage: + # Ensure images and backing files are present. + self._create_images_and_backing(context, instance, + instance_dir, disk_info) if not (is_block_migration or is_shared_instance_path): # NOTE(angdraug): when block storage is shared between source and From 8c072050cddfb3517e9e949ff9180f2d9ab8d646 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 26 Jun 2014 15:32:29 -0700 Subject: [PATCH 208/486] Initialize Ironic virt driver directory implements bp: add-ironic-driver Change-Id: Iaee58de9c962d960688015efac95eb1a80cf1296 --- nova/virt/ironic/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 nova/virt/ironic/__init__.py diff --git a/nova/virt/ironic/__init__.py b/nova/virt/ironic/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From 83275a61caed11b521e87c80f59f913474146441 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Fri, 6 Jun 2014 15:09:54 +0800 Subject: [PATCH 209/486] Remove useless check in _add_retry_host In populate_retry, the code already check the case with force_hosts and force_nodes. (https://github.com/openstack/nova/blob/418398dcca5 52497c8488dbbf54a06e21202b295/nova/scheduler/utils.py#L134) If the retry is disable, populate_retry won't add retry info into filter_properties. _add_retry_host is called later than populate_retry. It also check the force_hosts and force_nodes, that is useless. (https://github.com /openstack/nova/blob/418398dcca552497c8488dbbf54a06e21202b295/nova/ scheduler/utils.py#L193) It only need check whether retry info exist in filter_properties or not. Change-Id: I1f83b4b584d36aa6f894180d43aee24f096b4f78 --- nova/scheduler/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py index 2e7f4b67c5..5cc54287aa 100644 --- a/nova/scheduler/utils.py +++ b/nova/scheduler/utils.py @@ -196,9 +196,7 @@ def _add_retry_host(filter_properties, host, node): node has already been tried. """ retry = filter_properties.get('retry', None) - force_hosts = filter_properties.get('force_hosts', []) - force_nodes = filter_properties.get('force_nodes', []) - if not retry or force_hosts or force_nodes: + if not retry: return hosts = retry['hosts'] hosts.append([host, node]) From cc910afb260ab78bfc2568c686f723bff53aa37f Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Tue, 29 Jul 2014 13:50:51 +0800 Subject: [PATCH 210/486] Handle NotImplementedError in server_diagnostics v3 api In libvirt driver, it doesn't implement the function of get_instance_diagnostics().it returns http 500 error No. we expect http 501 if the function is not implemented. Change-Id: I59bfaa27b96284bd56917096e0c3fab0d277ba96 Closes-Bug: 1349680 --- .../openstack/compute/plugins/v3/server_diagnostics.py | 6 +++++- .../compute/plugins/v3/test_server_diagnostics.py | 10 ++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/compute/plugins/v3/server_diagnostics.py b/nova/api/openstack/compute/plugins/v3/server_diagnostics.py index 42f16dce61..4ca26fa3a7 100644 --- a/nova/api/openstack/compute/plugins/v3/server_diagnostics.py +++ b/nova/api/openstack/compute/plugins/v3/server_diagnostics.py @@ -19,6 +19,7 @@ from nova.api.openstack import extensions from nova import compute from nova import exception +from nova.i18n import _ ALIAS = "os-server-diagnostics" @@ -26,7 +27,7 @@ class ServerDiagnosticsController(object): - @extensions.expected_errors((404, 409)) + @extensions.expected_errors((404, 409, 501)) def index(self, req, server_id): context = req.environ["nova.context"] authorize(context) @@ -41,6 +42,9 @@ def index(self, req, server_id): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'get_diagnostics') + except NotImplementedError: + msg = _("Unable to get diagnostics, functionality not implemented") + raise webob.exc.HTTPNotImplemented(explanation=msg) class ServerDiagnostics(extensions.V3APIExtensionBase): diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py index e4da0c6790..4e840419e4 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py @@ -109,3 +109,13 @@ def test_get_diagnostics_raise_conflict_on_invalid_state(self, '/servers/%s/os-server-diagnostics' % UUID) res = req.get_response(self.router) self.assertEqual(409, res.status_int) + + @mock.patch.object(compute_api.API, 'get_instance_diagnostics', + side_effect=NotImplementedError) + @mock.patch.object(compute_api.API, 'get', fake_instance_get) + def test_get_diagnostics_raise_no_notimplementederror(self, + mock_get_diagnostics): + req = fakes.HTTPRequestV3.blank( + '/servers/%s/os-server-diagnostics' % UUID) + res = req.get_response(self.router) + self.assertEqual(501, res.status_int) From 7a206bc58de104ed0fa1c4e960be8f9467082f05 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 24 Jul 2014 12:27:46 +0100 Subject: [PATCH 211/486] libvirt: remove use of CONF.libvirt.virt_type in vif.py The vif.py class imports the CONF.libvirt.virt_type parameter from the main driver.py file. This sets up a circular dependency which prevents driver.py importing vif.py in the top of the file, since the CONF.libvirt.virt_type parameter won't have been registered yet. It is generally bad design practice to rely on global variables, which is effectively what the CONF.* parameters are, so change the VIF driver get_config() API to accept 'virt_type' as a parameter instead. Related-bug: #1302796 Change-Id: I023851df96640eeeb54124add8cc5c725b8d13be --- nova/tests/fake_network.py | 3 +- nova/tests/virt/libvirt/test_driver.py | 12 ++-- nova/tests/virt/libvirt/test_vif.py | 2 +- nova/virt/libvirt/driver.py | 12 ++-- nova/virt/libvirt/vif.py | 98 ++++++++++++++------------ 5 files changed, 71 insertions(+), 56 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index cc1e724c5e..5761dd3bc3 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -60,7 +60,8 @@ def __init__(self, *args, **kwargs): def setattr(self, key, val): self.__setattr__(key, val) - def get_config(self, instance, vif, image_meta, inst_type): + def get_config(self, instance, vif, image_meta, + inst_type, virt_type): conf = libvirt_config.LibvirtConfigGuestInterface() for attr, val in conf.__dict__.iteritems(): diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 674fdabcb5..c217f9e688 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -7480,11 +7480,13 @@ def _test_attach_detach_interface_get_config(self, method_name): self.context, test_instance['instance_type_id']) expected = conn.vif_driver.get_config(test_instance, network_info[0], fake_image_meta, - fake_flavor) + fake_flavor, + CONF.libvirt.virt_type) self.mox.StubOutWithMock(conn.vif_driver, 'get_config') conn.vif_driver.get_config(test_instance, network_info[0], fake_image_meta, - mox.IsA(objects.Flavor)).\ + mox.IsA(objects.Flavor), + CONF.libvirt.virt_type).\ AndReturn(expected) self.mox.ReplayAll() @@ -9979,14 +9981,16 @@ def _test_attach_detach_interface(self, method, power_state, elif method == 'detach_interface': fake_image_meta = None expected = self.libvirtconnection.vif_driver.get_config( - instance, network_info[0], fake_image_meta, fake_flavor) + instance, network_info[0], fake_image_meta, fake_flavor, + CONF.libvirt.virt_type) self.mox.StubOutWithMock(self.libvirtconnection.vif_driver, 'get_config') self.libvirtconnection.vif_driver.get_config( instance, network_info[0], fake_image_meta, - mox.IsA(objects.Flavor)).AndReturn(expected) + mox.IsA(objects.Flavor), + CONF.libvirt.virt_type).AndReturn(expected) domain.info().AndReturn([power_state]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), expected_flags) diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py index d36fef6e0b..3c4e409c5c 100644 --- a/nova/tests/virt/libvirt/test_vif.py +++ b/nova/tests/virt/libvirt/test_vif.py @@ -326,7 +326,7 @@ def _get_instance_xml(self, driver, vif, image_meta=None): default_inst_type['extra_specs'] = dict(extra_specs + quota_bandwidth) conf = self._get_conf() nic = driver.get_config(self.instance, vif, image_meta, - default_inst_type) + default_inst_type, CONF.libvirt.virt_type) conf.add_device(nic) return conf.to_xml() diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 47d670b6a4..eed8193253 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1426,7 +1426,7 @@ def attach_interface(self, instance, image_meta, vif): self.vif_driver.plug(instance, vif) self.firewall_driver.setup_basic_filtering(instance, [vif]) cfg = self.vif_driver.get_config(instance, vif, image_meta, - flavor) + flavor, CONF.libvirt.virt_type) try: flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] @@ -1444,7 +1444,8 @@ def detach_interface(self, instance, vif): flavor = objects.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) - cfg = self.vif_driver.get_config(instance, vif, None, flavor) + cfg = self.vif_driver.get_config(instance, vif, None, flavor, + CONF.libvirt.virt_type) try: self.vif_driver.unplug(instance, vif) flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG @@ -3327,10 +3328,9 @@ def _get_guest_config(self, instance, network_info, image_meta, guest.add_device(config) for vif in network_info: - config = self.vif_driver.get_config(instance, - vif, - image_meta, - flavor) + config = self.vif_driver.get_config( + instance, vif, image_meta, + flavor, CONF.libvirt.virt_type) guest.add_device(config) if ((CONF.libvirt.virt_type == "qemu" or diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 727a4474b6..8f517ccedd 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -42,7 +42,6 @@ CONF = cfg.CONF CONF.register_opts(libvirt_vif_opts, 'libvirt') -CONF.import_opt('virt_type', 'nova.virt.libvirt.driver', group='libvirt') CONF.import_opt('use_ipv6', 'nova.netconf') DEV_PREFIX_ETH = 'eth' @@ -97,7 +96,7 @@ def get_vif_devname_with_prefix(self, vif, prefix): devname = self.get_vif_devname(vif) return prefix + devname[3:] - def get_config(self, instance, vif, image_meta, inst_type): + def get_config(self, instance, vif, image_meta, inst_type, virt_type): conf = vconfig.LibvirtConfigGuestInterface() # Default to letting libvirt / the hypervisor choose the model model = None @@ -114,20 +113,20 @@ def get_config(self, instance, vif, image_meta, inst_type): # Else if the virt type is KVM/QEMU, use virtio according # to the global config parameter if (model is None and - CONF.libvirt.virt_type in ('kvm', 'qemu') and + virt_type in ('kvm', 'qemu') and CONF.libvirt.use_virtio_for_bridges): model = network_model.VIF_MODEL_VIRTIO # Workaround libvirt bug, where it mistakenly # enables vhost mode, even for non-KVM guests if (model == network_model.VIF_MODEL_VIRTIO and - CONF.libvirt.virt_type == "qemu"): + virt_type == "qemu"): driver = "qemu" - if not is_vif_model_valid_for_virt(CONF.libvirt.virt_type, + if not is_vif_model_valid_for_virt(virt_type, model): raise exception.UnsupportedHardware(model=model, - virt=CONF.libvirt.virt_type) + virt=virt_type) designer.set_vif_guest_frontend_config( conf, vif['address'], model, driver) @@ -164,11 +163,12 @@ def get_firewall_required(self, vif): return True return False - def get_config_bridge(self, instance, vif, image_meta, inst_type): + def get_config_bridge(self, instance, vif, image_meta, + inst_type, virt_type): """Get VIF configurations for bridge type.""" conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) designer.set_vif_host_backend_bridge_config( conf, self.get_bridge_name(vif), @@ -183,10 +183,10 @@ def get_config_bridge(self, instance, vif, image_meta, inst_type): return conf def get_config_ovs_bridge(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) designer.set_vif_host_backend_ovs_config( conf, self.get_bridge_name(vif), @@ -198,59 +198,67 @@ def get_config_ovs_bridge(self, instance, vif, image_meta, return conf def get_config_ovs_hybrid(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): newvif = copy.deepcopy(vif) newvif['network']['bridge'] = self.get_br_name(vif['id']) - return self.get_config_bridge(instance, newvif, - image_meta, inst_type) + return self.get_config_bridge(instance, newvif, image_meta, + inst_type, virt_type) - def get_config_ovs(self, instance, vif, image_meta, inst_type): + def get_config_ovs(self, instance, vif, image_meta, + inst_type, virt_type): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): return self.get_config_ovs_hybrid(instance, vif, image_meta, - inst_type) + inst_type, + virt_type) else: return self.get_config_ovs_bridge(instance, vif, image_meta, - inst_type) + inst_type, + virt_type) def get_config_ivs_hybrid(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): newvif = copy.deepcopy(vif) newvif['network']['bridge'] = self.get_br_name(vif['id']) return self.get_config_bridge(instance, newvif, image_meta, - inst_type) + inst_type, + virt_type) def get_config_ivs_ethernet(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, - inst_type) + inst_type, + virt_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) return conf - def get_config_ivs(self, instance, vif, image_meta, inst_type): + def get_config_ivs(self, instance, vif, image_meta, + inst_type, virt_type): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): return self.get_config_ivs_hybrid(instance, vif, image_meta, - inst_type) + inst_type, + virt_type) else: return self.get_config_ivs_ethernet(instance, vif, image_meta, - inst_type) + inst_type, + virt_type) def get_config_802qbg(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) params = vif["qbg_params"] designer.set_vif_host_backend_802qbg_config( @@ -265,10 +273,10 @@ def get_config_802qbg(self, instance, vif, image_meta, return conf def get_config_802qbh(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) params = vif["qbh_params"] designer.set_vif_host_backend_802qbh_config( @@ -280,10 +288,10 @@ def get_config_802qbh(self, instance, vif, image_meta, return conf def get_config_iovisor(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) @@ -293,10 +301,10 @@ def get_config_iovisor(self, instance, vif, image_meta, return conf def get_config_midonet(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) @@ -304,10 +312,10 @@ def get_config_midonet(self, instance, vif, image_meta, return conf def get_config_mlnx_direct(self, instance, vif, image_meta, - inst_type): + inst_type, virt_type): conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, - image_meta, inst_type) + self).get_config(instance, vif, image_meta, + inst_type, virt_type) devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH) designer.set_vif_host_backend_direct_config(conf, devname) @@ -316,13 +324,14 @@ def get_config_mlnx_direct(self, instance, vif, image_meta, return conf - def get_config(self, instance, vif, image_meta, inst_type): + def get_config(self, instance, vif, image_meta, + inst_type, virt_type): vif_type = vif['type'] LOG.debug('vif_type=%(vif_type)s instance=%(instance)s ' - 'vif=%(vif)s', + 'vif=%(vif)s virt_type%(virt_type)s', {'vif_type': vif_type, 'instance': instance, - 'vif': vif}) + 'vif': vif, 'virt_type': virt_type}) if vif_type is None: raise exception.NovaException( @@ -333,7 +342,8 @@ def get_config(self, instance, vif, image_meta, inst_type): if not func: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type) - return func(instance, vif, image_meta, inst_type) + return func(instance, vif, image_meta, + inst_type, virt_type) def plug_bridge(self, instance, vif): """Ensure that the bridge exists, and add VIF to it.""" From 7561c8ded211d53e8745d1420a73b82bd0fc35cf Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 24 Jul 2014 11:53:47 +0100 Subject: [PATCH 212/486] libvirt: remove 'vif_driver' config parameter The 'vif_driver' config parameter was deprecated in the previous cycle, so can be deleted now. The two places in the test file which were overriding the 'vif_driver' config parameter were redundant as mocking already ensured the real VIF driver module was not exercised. The FakeVIFDriver class is thus no longer required, and the FakeIptablesFirewallDriver class was already not required, so both are deleted. DocImpact Related-bug: #1302796 Change-Id: I75519266ffc65df92efd20badfeaf88ac0d66f8a --- nova/tests/fake_network.py | 37 -------------------------- nova/tests/virt/libvirt/test_driver.py | 5 ---- nova/virt/libvirt/driver.py | 10 +++---- 3 files changed, 3 insertions(+), 49 deletions(-) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 5761dd3bc3..dd6c588a78 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -33,7 +33,6 @@ from nova.tests.objects import test_fixed_ip from nova.tests.objects import test_instance_info_cache from nova.tests.objects import test_pci_device -from nova.virt.libvirt import config as libvirt_config HOST = "testhost" @@ -41,42 +40,6 @@ CONF.import_opt('use_ipv6', 'nova.netconf') -class FakeIptablesFirewallDriver(object): - def __init__(self, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - def apply_instance_filter(self, instance, network_info): - pass - - -class FakeVIFDriver(object): - - def __init__(self, *args, **kwargs): - pass - - def setattr(self, key, val): - self.__setattr__(key, val) - - def get_config(self, instance, vif, image_meta, - inst_type, virt_type): - conf = libvirt_config.LibvirtConfigGuestInterface() - - for attr, val in conf.__dict__.iteritems(): - if val is None: - setattr(conf, attr, 'fake') - - return conf - - def plug(self, instance, vif): - pass - - def unplug(self, instance, vif): - pass - - class FakeModel(dict): """Represent a model from the db.""" def __init__(self, *args, **kwargs): diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index c217f9e688..b24cdfc8fc 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -541,9 +541,6 @@ def defineXML(self, xml): for key, val in kwargs.items(): fake.__setattr__(key, val) - self.flags(vif_driver="nova.tests.fake_network.FakeVIFDriver", - group='libvirt') - self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake) def fake_lookup(self, instance_name): @@ -7791,8 +7788,6 @@ def test_create_without_pause(self): def _test_create_with_network_events(self, neutron_failure=None, power_on=True): - self.flags(vif_driver="nova.tests.fake_network.FakeVIFDriver", - group='libvirt') generated_events = [] def wait_timeout(): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index eed8193253..154d96d1f4 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -94,6 +94,7 @@ from nova.virt.libvirt import lvm from nova.virt.libvirt import rbd from nova.virt.libvirt import utils as libvirt_utils +from nova.virt.libvirt import vif as libvirt_vif from nova.virt import netutils from nova.virt import watchdog_actions from nova import volume @@ -161,11 +162,6 @@ help='Snapshot image format (valid options are : ' 'raw, qcow2, vmdk, vdi). ' 'Defaults to same as source image'), - cfg.StrOpt('vif_driver', - default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver', - help='DEPRECATED. The libvirt VIF driver to configure the VIFs.' - 'This option is deprecated and will be removed in the ' - 'Juno release.'), cfg.ListOpt('volume_drivers', default=[ 'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver', @@ -342,8 +338,8 @@ def __init__(self, virtapi, read_only=False): self.virtapi, get_connection=self._get_connection) - vif_class = importutils.import_class(CONF.libvirt.vif_driver) - self.vif_driver = vif_class(self._get_connection) + self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver( + self._get_connection) self.volume_drivers = driver.driver_dict_from_config( CONF.libvirt.volume_drivers, self) From b5952b1ac90a7fc3692d80d412345a1a39b3aeb1 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 24 Jul 2014 13:16:54 +0100 Subject: [PATCH 213/486] libvirt: remove pointless LibvirtBaseVIFDriver class There is only one single VIF driver class these days, so it is pointless having a LibvirtBaseVIFDriver base class to inherit from. Related-bug: #1302796 Change-Id: Ie132135f9b6cf5337782ee9d1611c05bd56557fe --- nova/virt/libvirt/vif.py | 117 ++++++++++----------------------------- 1 file changed, 30 insertions(+), 87 deletions(-) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 8f517ccedd..4885828e92 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -79,7 +79,8 @@ def is_vif_model_valid_for_virt(virt_type, vif_model): return vif_model in valid_models[virt_type] -class LibvirtBaseVIFDriver(object): +class LibvirtGenericVIFDriver(object): + """Generic VIF driver for libvirt networking.""" def __init__(self, get_connection): self.get_connection = get_connection @@ -96,7 +97,8 @@ def get_vif_devname_with_prefix(self, vif, prefix): devname = self.get_vif_devname(vif) return prefix + devname[3:] - def get_config(self, instance, vif, image_meta, inst_type, virt_type): + def get_base_config(self, instance, vif, image_meta, + inst_type, virt_type): conf = vconfig.LibvirtConfigGuestInterface() # Default to letting libvirt / the hypervisor choose the model model = None @@ -133,16 +135,6 @@ def get_config(self, instance, vif, image_meta, inst_type, virt_type): return conf - def plug(self, instance, vif): - pass - - def unplug(self, instance, vif): - pass - - -class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): - """Generic VIF driver for libvirt networking.""" - def get_bridge_name(self, vif): return vif['network']['bridge'] @@ -166,9 +158,8 @@ def get_firewall_required(self, vif): def get_config_bridge(self, instance, vif, image_meta, inst_type, virt_type): """Get VIF configurations for bridge type.""" - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) designer.set_vif_host_backend_bridge_config( conf, self.get_bridge_name(vif), @@ -184,9 +175,8 @@ def get_config_bridge(self, instance, vif, image_meta, def get_config_ovs_bridge(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) designer.set_vif_host_backend_ovs_config( conf, self.get_bridge_name(vif), @@ -229,12 +219,11 @@ def get_config_ivs_hybrid(self, instance, vif, image_meta, def get_config_ivs_ethernet(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, - vif, - image_meta, - inst_type, - virt_type) + conf = self.get_base_config(instance, + vif, + image_meta, + inst_type, + virt_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) @@ -256,9 +245,8 @@ def get_config_ivs(self, instance, vif, image_meta, def get_config_802qbg(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) params = vif["qbg_params"] designer.set_vif_host_backend_802qbg_config( @@ -274,9 +262,8 @@ def get_config_802qbg(self, instance, vif, image_meta, def get_config_802qbh(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) params = vif["qbh_params"] designer.set_vif_host_backend_802qbh_config( @@ -289,9 +276,8 @@ def get_config_802qbh(self, instance, vif, image_meta, def get_config_iovisor(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) @@ -302,9 +288,8 @@ def get_config_iovisor(self, instance, vif, image_meta, def get_config_midonet(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) @@ -313,9 +298,8 @@ def get_config_midonet(self, instance, vif, image_meta, def get_config_mlnx_direct(self, instance, vif, image_meta, inst_type, virt_type): - conf = super(LibvirtGenericVIFDriver, - self).get_config(instance, vif, image_meta, - inst_type, virt_type) + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH) designer.set_vif_host_backend_direct_config(conf, devname) @@ -347,8 +331,6 @@ def get_config(self, instance, vif, image_meta, def plug_bridge(self, instance, vif): """Ensure that the bridge exists, and add VIF to it.""" - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) network = vif['network'] if (not network.get_meta('multi_host', False) and network.get_meta('should_create_bridge', False)): @@ -374,8 +356,7 @@ def plug_bridge(self, instance, vif): def plug_ovs_bridge(self, instance, vif): """No manual plugging required.""" - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) + pass def plug_ovs_hybrid(self, instance, vif): """Plug using hybrid strategy @@ -385,9 +366,6 @@ def plug_ovs_hybrid(self, instance, vif): of the veth device just like a normal OVS port. Then boot the VIF on the linux bridge using standard libvirt mechanisms. """ - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) - iface_id = self.get_ovs_interfaceid(vif) br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) @@ -418,9 +396,6 @@ def plug_ovs(self, instance, vif): self.plug_ovs_bridge(instance, vif) def plug_ivs_ethernet(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) - iface_id = self.get_ovs_interfaceid(vif) dev = self.get_vif_devname(vif) linux_net.create_tap_dev(dev) @@ -435,9 +410,6 @@ def plug_ivs_hybrid(self, instance, vif): of the veth device just like a normal IVS port. Then boot the VIF on the linux bridge using standard libvirt mechanisms. """ - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) - iface_id = self.get_ovs_interfaceid(vif) br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) @@ -467,9 +439,6 @@ def plug_ivs(self, instance, vif): self.plug_ivs_ethernet(instance, vif) def plug_mlnx_direct(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) - vnic_mac = vif['address'] device_id = instance['uuid'] fabric = vif.get_physical_network() @@ -485,20 +454,16 @@ def plug_mlnx_direct(self, instance, vif): LOG.exception(_LE("Failed while plugging vif"), instance=instance) def plug_802qbg(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) + pass def plug_802qbh(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) + pass def plug_midonet(self, instance, vif): """Plug into MidoNet's network port Bind the vif to a MidoNet virtual port. """ - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) dev = self.get_vif_devname(vif) port_id = vif['id'] try: @@ -514,8 +479,6 @@ def plug_iovisor(self, instance, vif): Connect a network device to their respective Virtual Domain in PLUMgrid Platform. """ - super(LibvirtGenericVIFDriver, - self).plug(instance, vif) dev = self.get_vif_devname(vif) iface_id = vif['id'] linux_net.create_tap_dev(dev) @@ -553,13 +516,11 @@ def plug(self, instance, vif): def unplug_bridge(self, instance, vif): """No manual unplugging required.""" - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) + pass def unplug_ovs_bridge(self, instance, vif): """No manual unplugging required.""" - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) + pass def unplug_ovs_hybrid(self, instance, vif): """UnPlug using hybrid strategy @@ -567,9 +528,6 @@ def unplug_ovs_hybrid(self, instance, vif): Unhook port from OVS, unhook port from bridge, delete bridge, and delete both veth devices. """ - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) - try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) @@ -596,9 +554,6 @@ def unplug_ovs(self, instance, vif): def unplug_ivs_ethernet(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) - try: linux_net.delete_ivs_vif_port(self.get_vif_devname(vif)) except processutils.ProcessExecutionError: @@ -611,9 +566,6 @@ def unplug_ivs_hybrid(self, instance, vif): Unhook port from IVS, unhook port from bridge, delete bridge, and delete both veth devices. """ - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) - try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) @@ -634,9 +586,6 @@ def unplug_ivs(self, instance, vif): self.unplug_ivs_ethernet(instance, vif) def unplug_mlnx_direct(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) - vnic_mac = vif['address'] fabric = vif.get_physical_network() if not fabric: @@ -650,20 +599,16 @@ def unplug_mlnx_direct(self, instance, vif): instance=instance) def unplug_802qbg(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) + pass def unplug_802qbh(self, instance, vif): - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) + pass def unplug_midonet(self, instance, vif): """Unplug from MidoNet network port Unbind the vif from a MidoNet virtual port. """ - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) dev = self.get_vif_devname(vif) port_id = vif['id'] try: @@ -680,8 +625,6 @@ def unplug_iovisor(self, instance, vif): Delete network device and to their respective connection to the Virtual Domain in PLUMgrid Platform. """ - super(LibvirtGenericVIFDriver, - self).unplug(instance, vif) iface_id = vif['id'] dev = self.get_vif_devname(vif) try: From 6338e1acf06b3d40f53bc9fe416926a49d173ea8 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 28 May 2014 12:00:23 +0100 Subject: [PATCH 214/486] libvirt: add support for guest NUMA topology in XML config Add a libvirt config class for dealing with guest NUMA topology settings under the part of the schema. Blueprint: virt-driver-numa-placement Change-Id: Ia928fb5b91e8dbf5bbaf255e9ace2b84fe49b187 --- nova/tests/virt/libvirt/test_config.py | 61 ++++++++++++++++++++++++ nova/virt/libvirt/config.py | 65 ++++++++++++++++++++++++++ 2 files changed, 126 insertions(+) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index 115e4554be..f10e72d44f 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -229,6 +229,34 @@ def test_config_simple(self): """) +class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest): + + def test_config_simple(self): + obj = config.LibvirtConfigGuestCPUNUMA() + + cell = config.LibvirtConfigGuestCPUNUMACell() + cell.id = 0 + cell.cpus = set([0, 1]) + cell.memory = 1000000 + + obj.cells.append(cell) + + cell = config.LibvirtConfigGuestCPUNUMACell() + cell.id = 1 + cell.cpus = set([2, 3]) + cell.memory = 1500000 + + obj.cells.append(cell) + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + + + + """) + + class LibvirtConfigCPUTest(LibvirtConfigBaseTest): def test_config_simple(self): @@ -344,6 +372,39 @@ def test_config_host(self): """) + def test_config_host_with_numa(self): + obj = config.LibvirtConfigGuestCPU() + obj.mode = "host-model" + obj.match = "exact" + + numa = config.LibvirtConfigGuestCPUNUMA() + + cell = config.LibvirtConfigGuestCPUNUMACell() + cell.id = 0 + cell.cpus = set([0, 1]) + cell.memory = 1000000 + + numa.cells.append(cell) + + cell = config.LibvirtConfigGuestCPUNUMACell() + cell.id = 1 + cell.cpus = set([2, 3]) + cell.memory = 1500000 + + numa.cells.append(cell) + + obj.numa = numa + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + + + + + + """) + class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 4872ba515f..012abdc3b7 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -465,6 +465,63 @@ def format_dom(self): return ft +class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell", + **kwargs) + self.id = None + self.cpus = None + self.memory = None + + def parse_dom(self, xmldoc): + if xmldoc.get("id") is not None: + self.id = int(xmldoc.get("id")) + if xmldoc.get("memory") is not None: + self.memory = int(xmldoc.get("memory")) + if xmldoc.get("cpus") is not None: + self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus")) + + def format_dom(self): + cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom() + + if self.id is not None: + cell.set("id", str(self.id)) + if self.cpus is not None: + cell.set("cpus", + hardware.format_cpu_spec(self.cpus)) + if self.memory is not None: + cell.set("memory", str(self.memory)) + + return cell + + +class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa", + **kwargs) + + self.cells = [] + + def parse_dom(self, xmldoc): + super(LibvirtConfigGuestCPUNUMA, self).parse_Dom(xmldoc) + + for child in xmldoc.getchildren(): + if child.tag == "cell": + cell = LibvirtConfigGuestCPUNUMACell() + cell.parse_dom(child) + self.cells.append(cell) + + def format_dom(self): + numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom() + + for cell in self.cells: + numa.append(cell.format_dom()) + + return numa + + class LibvirtConfigGuestCPU(LibvirtConfigCPU): def __init__(self, **kwargs): @@ -472,11 +529,17 @@ def __init__(self, **kwargs): self.mode = None self.match = "exact" + self.numa = None def parse_dom(self, xmldoc): super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc) self.mode = xmldoc.get('mode') self.match = xmldoc.get('match') + for child in xmldoc.getchildren(): + if child.tag == "numa": + numa = LibvirtConfigGuestCPUNUMA() + numa.parse_dom(child) + self.numa = numa def format_dom(self): cpu = super(LibvirtConfigGuestCPU, self).format_dom() @@ -484,6 +547,8 @@ def format_dom(self): if self.mode: cpu.set("mode", self.mode) cpu.set("match", self.match) + if self.numa is not None: + cpu.append(self.numa.format_dom()) return cpu From 8831f3d0e2e2a81a6b406cc9c8bf89bc15989065 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 2 Apr 2014 15:31:23 +0200 Subject: [PATCH 215/486] Allow to unshelve instance booted from volume When we shelve an instance, volumes are not detached from cinder to kept them 'In-Use' in cinder But when you unshelve this instance, nova ask cinder the reattach theses volumes. This fails, because volume cannot be attached twice. This patch permits when we ask the libvirt DriverBlockDevice to attach a device to the instance to bypass the cinder attachement code that is not needed when we unshelve an instance, because the cinder volume is kept 'In-use' during the 'shelved' state. Co-Authored-By: Sahid Orentino Ferdjaoui Closes-bug: #1305399 Change-Id: I780a9407feeb48ecd3e295508ce3e6bc3b09d3e6 --- nova/compute/manager.py | 14 +++++--- nova/tests/compute/test_compute.py | 6 ++-- nova/tests/compute/test_shelve.py | 9 ++--- nova/tests/virt/test_block_device.py | 52 +++++++++++++++++++++------- nova/virt/block_device.py | 19 +++++----- 5 files changed, 66 insertions(+), 34 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f1e744f340..2ff0e6fb66 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1705,7 +1705,8 @@ def _is_mapping(bdm): swap, block_device_mapping) - def _prep_block_device(self, context, instance, bdms): + def _prep_block_device(self, context, instance, bdms, + do_check_attach=True): """Set up the block device for an instance with error logging.""" try: block_device_info = { @@ -1716,15 +1717,17 @@ def _prep_block_device(self, context, instance, bdms): driver_block_device.attach_block_devices( driver_block_device.convert_volumes(bdms), context, instance, self.volume_api, - self.driver) + + self.driver, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_snapshots(bdms), context, instance, self.volume_api, - self.driver, self._await_block_device_map_created) + + self.driver, self._await_block_device_map_created, + do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_images(bdms), context, instance, self.volume_api, - self.driver, self._await_block_device_map_created)) + self.driver, self._await_block_device_map_created, + do_check_attach=do_check_attach)) } if self.use_legacy_block_device_info: @@ -4027,7 +4030,8 @@ def _unshelve_instance(self, context, instance, image, filter_properties, bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) - block_device_info = self._prep_block_device(context, instance, bdms) + block_device_info = self._prep_block_device(context, instance, bdms, + do_check_attach=False) scrubbed_keys = self._unshelve_instance_key_scrub(instance) if node is None: diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index c5e17014eb..d0cf309a2d 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -380,7 +380,8 @@ def setUp(self): self.context, objects.Instance(), fake_instance.fake_db_instance()) self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw: - {'id': self.volume_id}) + {'id': self.volume_id, + 'attach_status': 'detached'}) self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'initialize_connection', @@ -4273,7 +4274,8 @@ def test_finish_resize_with_volumes(self): volume_id = 'fake' volume = {'instance_uuid': None, 'device_name': None, - 'id': volume_id} + 'id': volume_id, + 'attach_status': 'detached'} bdm = objects.BlockDeviceMapping( **{'source_type': 'volume', 'destination_type': 'volume', diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py index 4bc195dd01..8ec670096f 100644 --- a/nova/tests/compute/test_shelve.py +++ b/nova/tests/compute/test_shelve.py @@ -211,7 +211,7 @@ def fake_claim(context, instance, limits): columns_to_join=['metadata', 'system_metadata'], ).AndReturn((db_instance, db_instance)) self.compute._prep_block_device(self.context, instance, - mox.IgnoreArg()).AndReturn('fake_bdm') + mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm') db_instance['key_data'] = None db_instance['auto_disk_config'] = None self.compute.network_api.migrate_instance_finish( @@ -250,7 +250,6 @@ def fake_claim(context, instance, limits): def test_unshelve_volume_backed(self): db_instance = jsonutils.to_primitive(self._create_fake_instance()) - host = 'fake-mini' node = test_compute.NODENAME limits = {} filter_properties = {'limits': limits} @@ -262,10 +261,6 @@ def test_unshelve_volume_backed(self): expected_attrs=['metadata', 'system_metadata']) instance.task_state = task_states.UNSHELVING instance.save() - sys_meta = dict(instance.system_metadata) - sys_meta['shelved_at'] = timeutils.strtime(at=cur_time) - sys_meta['shelved_image_id'] = None - sys_meta['shelved_host'] = host self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_prep_block_device') @@ -284,7 +279,7 @@ def test_unshelve_volume_backed(self): columns_to_join=['metadata', 'system_metadata'] ).AndReturn((db_instance, db_instance)) self.compute._prep_block_device(self.context, instance, - mox.IgnoreArg()).AndReturn('fake_bdm') + mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm') db_instance['key_data'] = None db_instance['auto_disk_config'] = None self.compute.network_api.migrate_instance_finish( diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py index f34d18b120..af727b5d55 100644 --- a/nova/tests/virt/test_block_device.py +++ b/nova/tests/virt/test_block_device.py @@ -278,7 +278,8 @@ def test_driver_image_block_device_destination_local(self): def _test_volume_attach(self, driver_bdm, bdm_dict, fake_volume, check_attach=True, fail_check_attach=False, driver_attach=False, - fail_driver_attach=False, access_mode='rw'): + fail_driver_attach=False, volume_attach=True, + access_mode='rw'): elevated_context = self.context.elevated() self.stubs.Set(self.context, 'elevated', lambda: elevated_context) @@ -330,16 +331,18 @@ def _test_volume_attach(self, driver_bdm, bdm_dict, expected_conn_info).AndReturn(None) return instance, expected_conn_info - self.volume_api.attach(elevated_context, fake_volume['id'], - 'fake_uuid', bdm_dict['device_name'], - mode=access_mode).AndReturn(None) + if volume_attach: + self.volume_api.attach(elevated_context, fake_volume['id'], + 'fake_uuid', bdm_dict['device_name'], + mode=access_mode).AndReturn(None) driver_bdm._bdm_obj.save(self.context).AndReturn(None) return instance, expected_conn_info def test_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) - volume = {'id': 'fake-volume-id-1'} + volume = {'id': 'fake-volume-id-1', + 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) @@ -353,7 +356,8 @@ def test_volume_attach(self): def test_volume_attach_ro(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) - volume = {'id': 'fake-volume-id-1'} + volume = {'id': 'fake-volume-id-1', + 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, access_mode='ro') @@ -377,10 +381,29 @@ def check_volume_attach_check_attach_fails(self): self.asserRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver) + def test_volume_no_volume_attach(self): + test_bdm = self.driver_classes['volume']( + self.volume_bdm) + volume = {'id': 'fake-volume-id-1', + 'attach_status': 'detached'} + + instance, expected_conn_info = self._test_volume_attach( + test_bdm, self.volume_bdm, volume, check_attach=False, + driver_attach=False) + + self.mox.ReplayAll() + + test_bdm.attach(self.context, instance, + self.volume_api, self.virt_driver, + do_check_attach=False, do_driver_attach=False) + self.assertThat(test_bdm['connection_info'], + matchers.DictMatches(expected_conn_info)) + def test_volume_attach_no_check_driver_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) - volume = {'id': 'fake-volume-id-1'} + volume = {'id': 'fake-volume-id-1', + 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, @@ -437,8 +460,10 @@ def test_snapshot_attach_no_volume(self): no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot'](no_volume_snapshot) - snapshot = {'id': 'fake-snapshot-id-1'} - volume = {'id': 'fake-volume-id-2'} + snapshot = {'id': 'fake-volume-id-1', + 'attach_status': 'detached'} + volume = {'id': 'fake-volume-id-2', + 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() @@ -469,7 +494,8 @@ def test_snapshot_attach_volume(self): self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, - self.virt_driver).AndReturn(None) + self.virt_driver, do_check_attach=True + ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, @@ -482,7 +508,8 @@ def test_image_attach_no_volume(self): test_bdm = self.driver_classes['image'](no_volume_image) image = {'id': 'fake-image-id-1'} - volume = {'id': 'fake-volume-id-2'} + volume = {'id': 'fake-volume-id-2', + 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() @@ -511,7 +538,8 @@ def test_image_attach_volume(self): self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, - self.virt_driver).AndReturn(None) + self.virt_driver, do_check_attach=True + ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index c900194b1e..0022d23311 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -250,8 +250,9 @@ def attach(self, context, instance, volume_api, virt_driver, mode = 'rw' if 'data' in connection_info: mode = connection_info['data'].get('access_mode', 'rw') - volume_api.attach(context, volume_id, instance['uuid'], - self['mount_device'], mode=mode) + if volume['attach_status'] == "detached": + volume_api.attach(context, volume_id, instance['uuid'], + self['mount_device'], mode=mode) @update_db def refresh_connection_info(self, context, instance, @@ -285,7 +286,7 @@ class DriverSnapshotBlockDevice(DriverVolumeBlockDevice): _proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id']) def attach(self, context, instance, volume_api, - virt_driver, wait_func=None): + virt_driver, wait_func=None, do_check_attach=True): if not self.volume_id: snapshot = volume_api.get_snapshot(context, @@ -298,8 +299,9 @@ def attach(self, context, instance, volume_api, self.volume_id = vol['id'] # Call the volume attach now - super(DriverSnapshotBlockDevice, self).attach(context, instance, - volume_api, virt_driver) + super(DriverSnapshotBlockDevice, self).attach( + context, instance, volume_api, virt_driver, + do_check_attach=do_check_attach) class DriverImageBlockDevice(DriverVolumeBlockDevice): @@ -308,7 +310,7 @@ class DriverImageBlockDevice(DriverVolumeBlockDevice): _proxy_as_attr = set(['volume_size', 'volume_id', 'image_id']) def attach(self, context, instance, volume_api, - virt_driver, wait_func=None): + virt_driver, wait_func=None, do_check_attach=True): if not self.volume_id: vol = volume_api.create(context, self.volume_size, '', '', image_id=self.image_id) @@ -317,8 +319,9 @@ def attach(self, context, instance, volume_api, self.volume_id = vol['id'] - super(DriverImageBlockDevice, self).attach(context, instance, - volume_api, virt_driver) + super(DriverImageBlockDevice, self).attach( + context, instance, volume_api, virt_driver, + do_check_attach=do_check_attach) def _convert_block_devices(device_type, block_device_mapping): From 513c6bbd36563e57a85d33f9c94f4a20ab7c00f4 Mon Sep 17 00:00:00 2001 From: jufeng Date: Wed, 21 May 2014 16:24:53 +0800 Subject: [PATCH 216/486] Fix attaching config drive issue on Hyper-V when migrate instances After instance resized or migrated on Hyper-V hypervisor. The configdrive iso or vhd is copied to resized or migrated instance, but is not attached to instance. Because there are configurations for config drive like config_drive_cdrom, config_drive_format, and the configurations on different Hyper-V compute node may be different. it will need to convert configdrive format after resized or migrated. It is easy to convert from iso9660 or vfat to vhd, but it seems impossible to convert from vhd to iso9660 or vfat. So this commit just ignore the target Hyper-V compute node's config drive configurations, leave the original config drive format. Change-Id: I349e3b2221fff0ae217a71a91895afd21ff7d18d Closes-Bug: #1321640 --- nova/tests/virt/hyperv/fake.py | 4 ++ nova/tests/virt/hyperv/test_hypervapi.py | 66 ++++++++++++++++++++- nova/tests/virt/hyperv/test_migrationops.py | 40 +++++++++++++ nova/tests/virt/hyperv/test_pathutils.py | 56 +++++++++++++++++ nova/tests/virt/hyperv/test_vmops.py | 38 ++++++++++++ nova/virt/hyperv/constants.py | 7 +++ nova/virt/hyperv/migrationops.py | 17 ++++++ nova/virt/hyperv/pathutils.py | 14 +++++ nova/virt/hyperv/vmops.py | 21 +++++-- 9 files changed, 255 insertions(+), 8 deletions(-) create mode 100644 nova/tests/virt/hyperv/test_migrationops.py create mode 100644 nova/tests/virt/hyperv/test_pathutils.py create mode 100644 nova/tests/virt/hyperv/test_vmops.py diff --git a/nova/tests/virt/hyperv/fake.py b/nova/tests/virt/hyperv/fake.py index f1d29a5546..0e92e76813 100644 --- a/nova/tests/virt/hyperv/fake.py +++ b/nova/tests/virt/hyperv/fake.py @@ -58,6 +58,10 @@ def lookup_root_vhd_path(self, instance_name): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'root.vhd') + def lookup_configdrive_path(self, instance_name): + instance_path = self.get_instance_dir(instance_name) + return os.path.join(instance_path, 'configdrive.iso') + def lookup_ephemeral_vhd_path(self, instance_name): instance_path = self.get_instance_dir(instance_name) if instance_path: diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index a37ed6b63d..fcae5206ba 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -1512,7 +1512,39 @@ def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self): flavor, network_info) self._mox.VerifyAll() - def _test_finish_migration(self, power_on, ephemeral_storage=False): + def _mock_attach_config_drive(self, instance, config_drive_format): + instance['config_drive'] = True + self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path') + m = fake.PathUtils.lookup_configdrive_path( + mox.Func(self._check_instance_name)) + + if config_drive_format in constants.DISK_FORMAT_MAP: + m.AndReturn(self._test_instance_dir + '/configdrive.' + + config_drive_format) + else: + m.AndReturn(None) + + m = vmutils.VMUtils.attach_ide_drive( + mox.Func(self._check_instance_name), + mox.IsA(str), + mox.IsA(int), + mox.IsA(int), + mox.IsA(str)) + m.WithSideEffects(self._add_ide_disk).InAnyOrder() + + def _verify_attach_config_drive(self, config_drive_format): + if config_drive_format == constants.IDE_DISK_FORMAT.lower(): + self.assertEqual(self._instance_ide_disks[1], + self._test_instance_dir + '/configdrive.' + + config_drive_format) + elif config_drive_format == constants.IDE_DVD_FORMAT.lower(): + self.assertEqual(self._instance_ide_dvds[0], + self._test_instance_dir + '/configdrive.' + + config_drive_format) + + def _test_finish_migration(self, power_on, ephemeral_storage=False, + config_drive=False, + config_drive_format='iso'): self._instance_data = self._get_instance_data() instance = db.instance_create(self._context, self._instance_data) instance['system_metadata'] = {} @@ -1561,11 +1593,17 @@ def _test_finish_migration(self, power_on, ephemeral_storage=False): vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name), constants.HYPERV_VM_STATE_ENABLED) + if config_drive: + self._mock_attach_config_drive(instance, config_drive_format) + self._mox.ReplayAll() self._conn.finish_migration(self._context, None, instance, "", network_info, None, False, None, power_on) self._mox.VerifyAll() + if config_drive: + self._verify_attach_config_drive(config_drive_format) + def test_finish_migration_power_on(self): self._test_finish_migration(True) @@ -1575,6 +1613,14 @@ def test_finish_migration_power_off(self): def test_finish_migration_with_ephemeral_storage(self): self._test_finish_migration(False, ephemeral_storage=True) + def test_finish_migration_attach_config_drive_iso(self): + self._test_finish_migration(False, config_drive=True, + config_drive_format=constants.IDE_DVD_FORMAT.lower()) + + def test_finish_migration_attach_config_drive_vhd(self): + self._test_finish_migration(False, config_drive=True, + config_drive_format=constants.IDE_DISK_FORMAT.lower()) + def test_confirm_migration(self): self._instance_data = self._get_instance_data() instance = db.instance_create(self._context, self._instance_data) @@ -1586,7 +1632,9 @@ def test_confirm_migration(self): self._conn.confirm_migration(None, instance, network_info) self._mox.VerifyAll() - def _test_finish_revert_migration(self, power_on, ephemeral_storage=False): + def _test_finish_revert_migration(self, power_on, ephemeral_storage=False, + config_drive=False, + config_drive_format='iso'): self._instance_data = self._get_instance_data() instance = db.instance_create(self._context, self._instance_data) network_info = fake_network.fake_get_instance_nw_info(self.stubs) @@ -1624,12 +1672,18 @@ def _test_finish_revert_migration(self, power_on, ephemeral_storage=False): vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name), constants.HYPERV_VM_STATE_ENABLED) + if config_drive: + self._mock_attach_config_drive(instance, config_drive_format) + self._mox.ReplayAll() self._conn.finish_revert_migration(self._context, instance, network_info, None, power_on) self._mox.VerifyAll() + if config_drive: + self._verify_attach_config_drive(config_drive_format) + def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(True) @@ -1645,6 +1699,14 @@ def test_spawn_no_admin_permissions(self): def test_finish_revert_migration_with_ephemeral_storage(self): self._test_finish_revert_migration(False, ephemeral_storage=True) + def test_finish_revert_migration_attach_config_drive_iso(self): + self._test_finish_revert_migration(False, config_drive=True, + config_drive_format=constants.IDE_DVD_FORMAT.lower()) + + def test_finish_revert_migration_attach_config_drive_vhd(self): + self._test_finish_revert_migration(False, config_drive=True, + config_drive_format=constants.IDE_DISK_FORMAT.lower()) + def test_plug_vifs(self): # Check to make sure the method raises NotImplementedError. self.assertRaises(NotImplementedError, diff --git a/nova/tests/virt/hyperv/test_migrationops.py b/nova/tests/virt/hyperv/test_migrationops.py new file mode 100644 index 0000000000..cfd8777103 --- /dev/null +++ b/nova/tests/virt/hyperv/test_migrationops.py @@ -0,0 +1,40 @@ +# Copyright 2014 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova import test +from nova.tests import fake_instance +from nova.virt.hyperv import migrationops +from nova.virt.hyperv import vmutils + + +class MigrationOpsTestCase(test.NoDBTestCase): + """Unit tests for the Hyper-V MigrationOps class.""" + + def setUp(self): + super(MigrationOpsTestCase, self).setUp() + self.context = 'fake-context' + self.flags(force_hyperv_utils_v1=True, group='hyperv') + self.flags(force_volumeutils_v1=True, group='hyperv') + self._migrationops = migrationops.MigrationOps() + + def test_check_and_attach_config_drive_unknown_path(self): + instance = fake_instance.fake_instance_obj(self.context) + instance.config_drive = 'True' + self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock( + return_value=None) + self.assertRaises(vmutils.HyperVException, + self._migrationops._check_and_attach_config_drive, + instance) diff --git a/nova/tests/virt/hyperv/test_pathutils.py b/nova/tests/virt/hyperv/test_pathutils.py new file mode 100644 index 0000000000..f18b584637 --- /dev/null +++ b/nova/tests/virt/hyperv/test_pathutils.py @@ -0,0 +1,56 @@ +# Copyright 2014 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova import test +from nova.virt.hyperv import constants +from nova.virt.hyperv import pathutils + + +class PathUtilsTestCase(test.NoDBTestCase): + """Unit tests for the Hyper-V PathUtils class.""" + + def setUp(self): + self.fake_instance_dir = 'C:/fake_instance_dir' + self.fake_instance_name = 'fake_instance_name' + self._pathutils = pathutils.PathUtils() + super(PathUtilsTestCase, self).setUp() + + def _mock_lookup_configdrive_path(self, ext): + self._pathutils.get_instance_dir = mock.MagicMock( + return_value=self.fake_instance_dir) + + def mock_exists(*args, **kwargs): + path = args[0] + return True if path[(path.rfind('.') + 1):] == ext else False + self._pathutils.exists = mock_exists + configdrive_path = self._pathutils.lookup_configdrive_path( + self.fake_instance_name) + return configdrive_path + + def test_lookup_configdrive_path(self): + for format_ext in constants.DISK_FORMAT_MAP: + configdrive_path = self._mock_lookup_configdrive_path(format_ext) + self.assertEqual(configdrive_path, + self.fake_instance_dir + '/configdrive.' + + format_ext) + + def test_lookup_configdrive_path_non_exist(self): + self._pathutils.get_instance_dir = mock.MagicMock( + return_value=self.fake_instance_dir) + self._pathutils.exists = mock.MagicMock(return_value=False) + configdrive_path = self._pathutils.lookup_configdrive_path( + self.fake_instance_name) + self.assertIsNone(configdrive_path) diff --git a/nova/tests/virt/hyperv/test_vmops.py b/nova/tests/virt/hyperv/test_vmops.py new file mode 100644 index 0000000000..b8f095b944 --- /dev/null +++ b/nova/tests/virt/hyperv/test_vmops.py @@ -0,0 +1,38 @@ +# Copyright 2014 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import exception +from nova import test +from nova.tests import fake_instance +from nova.virt.hyperv import vmops + + +class VMOpsTestCase(test.NoDBTestCase): + """Unit tests for the Hyper-V VMOps class.""" + + def __init__(self, test_case_name): + super(VMOpsTestCase, self).__init__(test_case_name) + + def setUp(self): + super(VMOpsTestCase, self).setUp() + self.context = 'fake-context' + self.flags(force_hyperv_utils_v1=True, group='hyperv') + self.flags(force_volumeutils_v1=True, group='hyperv') + self._vmops = vmops.VMOps() + + def test_attach_config_drive(self): + instance = fake_instance.fake_instance_obj(self.context) + self.assertRaises(exception.InvalidDiskFormat, + self._vmops.attach_config_drive, + instance, 'C:/fake_instance_dir/configdrive.xxx') diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py index e1e2ec9037..9c2116ac5e 100644 --- a/nova/virt/hyperv/constants.py +++ b/nova/virt/hyperv/constants.py @@ -66,7 +66,14 @@ VM_SUMMARY_UPTIME = 105 IDE_DISK = "VHD" +IDE_DISK_FORMAT = IDE_DISK IDE_DVD = "DVD" +IDE_DVD_FORMAT = "ISO" + +DISK_FORMAT_MAP = { + IDE_DISK_FORMAT.lower(): IDE_DISK, + IDE_DVD_FORMAT.lower(): IDE_DVD +} DISK_FORMAT_VHD = "VHD" DISK_FORMAT_VHDX = "VHDX" diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py index 31e3c101cc..a1bc934bd0 100644 --- a/nova/virt/hyperv/migrationops.py +++ b/nova/virt/hyperv/migrationops.py @@ -22,6 +22,7 @@ from nova.openstack.common import excutils from nova.openstack.common import log as logging from nova.openstack.common import units +from nova.virt import configdrive from nova.virt.hyperv import imagecache from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import vmops @@ -143,6 +144,17 @@ def _revert_migration_files(self, instance_name): instance_name) self._pathutils.rename(revert_path, instance_path) + def _check_and_attach_config_drive(self, instance): + if configdrive.required_by(instance): + configdrive_path = self._pathutils.lookup_configdrive_path( + instance.name) + if configdrive_path: + self._vmops.attach_config_drive(instance, configdrive_path) + else: + raise vmutils.HyperVException( + _("Config drive is required by instance: %s, " + "but it does not exist.") % instance.name) + def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): LOG.debug("finish_revert_migration called", instance=instance) @@ -160,6 +172,8 @@ def finish_revert_migration(self, context, instance, network_info, self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path) + self._check_and_attach_config_drive(instance) + if power_on: self._vmops.power_on(instance) @@ -268,5 +282,8 @@ def finish_migration(self, context, migration, instance, disk_info, self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path) + + self._check_and_attach_config_drive(instance) + if power_on: self._vmops.power_on(instance) diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py index 02937689c6..4e4a83a366 100644 --- a/nova/virt/hyperv/pathutils.py +++ b/nova/virt/hyperv/pathutils.py @@ -21,6 +21,7 @@ from nova.i18n import _ from nova.openstack.common import log as logging from nova import utils +from nova.virt.hyperv import constants LOG = logging.getLogger(__name__) @@ -132,6 +133,15 @@ def _lookup_vhd_path(self, instance_name, vhd_path_func): def lookup_root_vhd_path(self, instance_name): return self._lookup_vhd_path(instance_name, self.get_root_vhd_path) + def lookup_configdrive_path(self, instance_name): + configdrive_path = None + for format_ext in constants.DISK_FORMAT_MAP: + test_path = self.get_configdrive_path(instance_name, format_ext) + if self.exists(test_path): + configdrive_path = test_path + break + return configdrive_path + def lookup_ephemeral_vhd_path(self, instance_name): return self._lookup_vhd_path(instance_name, self.get_ephemeral_vhd_path) @@ -140,6 +150,10 @@ def get_root_vhd_path(self, instance_name, format_ext): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'root.' + format_ext.lower()) + def get_configdrive_path(self, instance_name, format_ext): + instance_path = self.get_instance_dir(instance_name) + return os.path.join(instance_path, 'configdrive.' + format_ext.lower()) + def get_ephemeral_vhd_path(self, instance_name, format_ext): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'ephemeral.' + format_ext.lower()) diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 9f72b06b56..2cc5bef91c 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -246,8 +246,10 @@ def spawn(self, context, instance, image_meta, injected_files, root_vhd_path, eph_vhd_path) if configdrive.required_by(instance): - self._create_config_drive(instance, injected_files, - admin_password) + configdrive_path = self._create_config_drive(instance, + injected_files, + admin_password) + self.attach_config_drive(instance, configdrive_path) self.power_on(instance) except Exception: @@ -327,7 +329,6 @@ def _create_config_drive(self, instance, injected_files, admin_password): e, instance=instance) if not CONF.hyperv.config_drive_cdrom: - drive_type = constants.IDE_DISK configdrive_path = os.path.join(instance_path, 'configdrive.vhd') utils.execute(CONF.hyperv.qemu_img_cmd, @@ -341,11 +342,19 @@ def _create_config_drive(self, instance, injected_files, admin_password): attempts=1) self._pathutils.remove(configdrive_path_iso) else: - drive_type = constants.IDE_DVD configdrive_path = configdrive_path_iso - self._vmutils.attach_ide_drive(instance['name'], configdrive_path, - 1, 0, drive_type) + return configdrive_path + + def attach_config_drive(self, instance, configdrive_path): + configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):] + # Do the attach here and if there is a certain file format that isn't + # supported in constants.DISK_FORMAT_MAP then bomb out. + try: + self._vmutils.attach_ide_drive(instance.name, configdrive_path, + 1, 0, constants.DISK_FORMAT_MAP[configdrive_ext]) + except KeyError: + raise exception.InvalidDiskFormat(disk_format=configdrive_ext) def _disconnect_volumes(self, volume_drives): for volume_drive in volume_drives: From 3efb8070da6becedcaa9af58b38bf9385baa2e7f Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Tue, 8 Apr 2014 14:07:20 +0900 Subject: [PATCH 217/486] Add API schema for v2.1/v3 cells API By defining the API schema, it is possible to separate the validation code from the API method. The API method can be more simple. In addition, a response of API validation error can be consistent for the whole Nova API. Partially implements blueprint v3-api-schema Change-Id: Ic400a31fe2c05e07785a2dc6c4fd864e684f4965 --- .../api/openstack/compute/plugins/v3/cells.py | 50 ++-------- .../api/openstack/compute/schemas/v3/cells.py | 99 +++++++++++++++++++ nova/api/validation/validators.py | 11 +++ .../compute/plugins/v3/test_cells.py | 65 +++++------- nova/tests/test_api_validation.py | 45 +++++++++ 5 files changed, 187 insertions(+), 83 deletions(-) create mode 100644 nova/api/openstack/compute/schemas/v3/cells.py diff --git a/nova/api/openstack/compute/plugins/v3/cells.py b/nova/api/openstack/compute/plugins/v3/cells.py index 9825f79e17..ee35daee43 100644 --- a/nova/api/openstack/compute/plugins/v3/cells.py +++ b/nova/api/openstack/compute/plugins/v3/cells.py @@ -22,14 +22,15 @@ from webob import exc from nova.api.openstack import common +from nova.api.openstack.compute.schemas.v3 import cells from nova.api.openstack import extensions from nova.api.openstack import wsgi +from nova.api import validation from nova.cells import rpcapi as cells_rpcapi from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.openstack.common import strutils -from nova.openstack.common import timeutils from nova import rpc @@ -187,21 +188,6 @@ def delete(self, req, id): raise exc.HTTPNotFound( explanation=_("Cell %s doesn't exist.") % id) - def _validate_cell_name(self, cell_name): - """Validate cell name is not empty and doesn't contain '!' or '.'.""" - if not cell_name: - msg = _("Cell name cannot be empty") - raise exc.HTTPBadRequest(explanation=msg) - if '!' in cell_name or '.' in cell_name: - msg = _("Cell name cannot contain '!' or '.'") - raise exc.HTTPBadRequest(explanation=msg) - - def _validate_cell_type(self, cell_type): - """Validate cell_type is 'parent' or 'child'.""" - if cell_type not in ['parent', 'child']: - msg = _("Cell type must be 'parent' or 'child'") - raise exc.HTTPBadRequest(explanation=msg) - def _normalize_cell(self, cell, existing=None): """Normalize input cell data. Normalizations include: @@ -211,7 +197,6 @@ def _normalize_cell(self, cell, existing=None): # Start with the cell type conversion if 'type' in cell: - self._validate_cell_type(cell['type']) cell['is_parent'] = cell['type'] == 'parent' del cell['type'] # Avoid cell type being overwritten to 'child' @@ -249,6 +234,7 @@ def _normalize_cell(self, cell, existing=None): @extensions.expected_errors((400, 403, 501)) @common.check_cells_enabled @wsgi.response(201) + @validation.schema(cells.create) def create(self, req, body): """Create a child cell entry.""" context = req.environ['nova.context'] @@ -256,14 +242,7 @@ def create(self, req, body): authorize(context) authorize(context, action="create") - if 'cell' not in body: - msg = _("No cell information in request") - raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] - if 'name' not in cell: - msg = _("No cell name in request") - raise exc.HTTPBadRequest(explanation=msg) - self._validate_cell_name(cell['name']) self._normalize_cell(cell) try: cell = self.cells_rpcapi.cell_create(context, cell) @@ -273,6 +252,7 @@ def create(self, req, body): @extensions.expected_errors((400, 403, 404, 501)) @common.check_cells_enabled + @validation.schema(cells.update) def update(self, req, id, body): """Update a child cell entry. 'id' is the cell name to update.""" context = req.environ['nova.context'] @@ -280,13 +260,9 @@ def update(self, req, id, body): authorize(context) authorize(context, action="update") - if 'cell' not in body: - msg = _("No cell information in request") - raise exc.HTTPBadRequest(explanation=msg) cell = body['cell'] cell.pop('id', None) - if 'name' in cell: - self._validate_cell_name(cell['name']) + try: # NOTE(Vek): There is a race condition here if multiple # callers are trying to update the cell @@ -309,6 +285,7 @@ def update(self, req, id, body): @extensions.expected_errors((400, 501)) @common.check_cells_enabled @wsgi.response(204) + @validation.schema(cells.sync_instances) def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] @@ -319,21 +296,8 @@ def sync_instances(self, req, body): project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) - if body: - msg = _("Only 'updated_since', 'project_id' and 'deleted' are " - "understood.") - raise exc.HTTPBadRequest(explanation=msg) if isinstance(deleted, six.string_types): - try: - deleted = strutils.bool_from_string(deleted, strict=True) - except ValueError as err: - raise exc.HTTPBadRequest(explanation=str(err)) - if updated_since: - try: - timeutils.parse_isotime(updated_since) - except ValueError: - msg = _('Invalid changes-since value') - raise exc.HTTPBadRequest(explanation=msg) + deleted = strutils.bool_from_string(deleted, strict=True) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted) diff --git a/nova/api/openstack/compute/schemas/v3/cells.py b/nova/api/openstack/compute/schemas/v3/cells.py new file mode 100644 index 0000000000..37a9ed5cc0 --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/cells.py @@ -0,0 +1,99 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.validation import parameter_types + + +create = { + 'type': 'object', + 'properties': { + 'cell': { + 'type': 'object', + 'properties': { + 'name': parameter_types.name, + 'type': { + 'type': 'string', + 'enum': ['parent', 'child'], + }, + + # NOTE: In unparse_transport_url(), a url consists of the + # following parameters: + # "qpid://:@:/" + # or + # "rabiit://:@:/" + # Then the url is stored into transport_url of cells table + # which is defined with String(255). + 'username': { + 'type': 'string', 'maxLength': 255, + 'pattern': '^[a-zA-Z0-9-_]*$' + }, + 'password': { + # Allow to specify any string for strong password. + 'type': 'string', 'maxLength': 255, + }, + 'rpc_host': parameter_types.hostname_or_ip_address, + 'rpc_port': parameter_types.tcp_udp_port, + 'rpc_virtual_host': parameter_types.hostname_or_ip_address, + }, + 'required': ['name'], + 'additionalProperties': False, + }, + }, + 'required': ['cell'], + 'additionalProperties': False, +} + + +update = { + 'type': 'object', + 'properties': { + 'cell': { + 'type': 'object', + 'properties': { + 'name': parameter_types.name, + 'type': { + 'type': 'string', + 'enum': ['parent', 'child'], + }, + 'username': { + 'type': 'string', 'maxLength': 255, + 'pattern': '^[a-zA-Z0-9-_]*$' + }, + 'password': { + 'type': 'string', 'maxLength': 255, + }, + 'rpc_host': parameter_types.hostname_or_ip_address, + 'rpc_port': parameter_types.tcp_udp_port, + 'rpc_virtual_host': parameter_types.hostname_or_ip_address, + }, + 'additionalProperties': False, + }, + }, + 'required': ['cell'], + 'additionalProperties': False, +} + + +sync_instances = { + 'type': 'object', + 'properties': { + 'project_id': parameter_types.project_id, + 'deleted': parameter_types.boolean, + 'updated_since': { + 'type': 'string', + 'format': 'date-time', + }, + }, + 'additionalProperties': False, +} diff --git a/nova/api/validation/validators.py b/nova/api/validation/validators.py index a5090e18c8..ce74923756 100644 --- a/nova/api/validation/validators.py +++ b/nova/api/validation/validators.py @@ -21,9 +21,20 @@ from nova import exception from nova.i18n import _ +from nova.openstack.common import timeutils from nova.openstack.common import uuidutils +@jsonschema.FormatChecker.cls_checks('date-time') +def _validate_datetime_format(instance): + try: + timeutils.parse_isotime(instance) + except ValueError: + return False + else: + return True + + @jsonschema.FormatChecker.cls_checks('uuid') def _validate_uuid_format(instance): return uuidutils.is_uuid_like(instance) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py index 63cf2d96bd..404525ebb0 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py @@ -180,13 +180,11 @@ def _cell_create_parent(self): 'username': 'fred', 'password': 'fubar', 'rpc_host': 'r3.example.org', - 'type': 'parent', - # Also test this is ignored/stripped - 'is_parent': False}} + 'type': 'parent'}} req = self._get_request("cells") req.environ['nova.context'] = self.context - res_dict = self.controller.create(req, body) + res_dict = self.controller.create(req, body=body) cell = res_dict['cell'] self.assertEqual(self.controller.create.wsgi_code, 201) self.assertEqual(cell['name'], 'meow') @@ -194,7 +192,6 @@ def _cell_create_parent(self): self.assertEqual(cell['rpc_host'], 'r3.example.org') self.assertEqual(cell['type'], 'parent') self.assertNotIn('password', cell) - self.assertNotIn('is_parent', cell) def test_cell_create_parent(self): # Test create with just cells policy @@ -215,7 +212,7 @@ def _cell_create_child(self): req = self._get_request("cells") req.environ['nova.context'] = self.context - res_dict = self.controller.create(req, body) + res_dict = self.controller.create(req, body=body) cell = res_dict['cell'] self.assertEqual(self.controller.create.wsgi_code, 201) self.assertEqual(cell['name'], 'meow') @@ -243,8 +240,8 @@ def test_cell_create_no_name_raises(self): req = self._get_request("cells") req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, req, body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, body=body) def test_cell_create_name_empty_string_raises(self): body = {'cell': {'name': '', @@ -255,8 +252,8 @@ def test_cell_create_name_empty_string_raises(self): req = self._get_request("cells") req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, req, body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, body=body) def test_cell_create_name_with_bang_raises(self): body = {'cell': {'name': 'moo!cow', @@ -267,20 +264,8 @@ def test_cell_create_name_with_bang_raises(self): req = self._get_request("cells") req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, req, body) - - def test_cell_create_name_with_dot_raises(self): - body = {'cell': {'name': 'moo.cow', - 'username': 'fred', - 'password': 'secret', - 'rpc_host': 'r3.example.org', - 'type': 'parent'}} - - req = self._get_request("cells") - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, req, body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, body=body) def test_cell_create_name_with_invalid_type_raises(self): body = {'cell': {'name': 'moocow', @@ -291,8 +276,8 @@ def test_cell_create_name_with_invalid_type_raises(self): req = self._get_request("cells") req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.create, req, body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, body=body) def test_cell_create_fails_for_invalid_policy(self): body = {'cell': {'name': 'fake'}} @@ -300,7 +285,7 @@ def test_cell_create_fails_for_invalid_policy(self): req.environ['nova.context'] = self.context req.environ['nova.context'].is_admin = False self.assertRaises(exception.PolicyNotAuthorized, - self.controller.create, req, body) + self.controller.create, req, body=body) def _cell_update(self): body = {'cell': {'username': 'zeb', @@ -308,7 +293,7 @@ def _cell_update(self): req = self._get_request("cells/cell1") req.environ['nova.context'] = self.context - res_dict = self.controller.update(req, 'cell1', body) + res_dict = self.controller.update(req, 'cell1', body=body) cell = res_dict['cell'] self.assertEqual(cell['name'], 'cell1') @@ -332,7 +317,7 @@ def test_cell_update_fails_for_invalid_policy(self): req.environ['nova.context'] = self.context req.environ['nova.context'].is_admin = False self.assertRaises(exception.PolicyNotAuthorized, - self.controller.create, req, body) + self.controller.create, req, body=body) def test_cell_update_empty_name_raises(self): body = {'cell': {'name': '', @@ -341,8 +326,8 @@ def test_cell_update_empty_name_raises(self): req = self._get_request("cells/cell1") req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.update, req, 'cell1', body) + self.assertRaises(exception.ValidationError, + self.controller.update, req, 'cell1', body=body) def test_cell_update_invalid_type_raises(self): body = {'cell': {'username': 'zeb', @@ -351,15 +336,15 @@ def test_cell_update_invalid_type_raises(self): req = self._get_request("cells/cell1") req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - self.controller.update, req, 'cell1', body) + self.assertRaises(exception.ValidationError, + self.controller.update, req, 'cell1', body=body) def test_cell_update_without_type_specified(self): body = {'cell': {'username': 'wingwj'}} req = self._get_request("cells/cell1") req.environ['nova.context'] = self.context - res_dict = self.controller.update(req, 'cell1', body) + res_dict = self.controller.update(req, 'cell1', body=body) cell = res_dict['cell'] self.assertEqual(cell['name'], 'cell1') @@ -373,12 +358,12 @@ def test_cell_update_with_type_specified(self): req1 = self._get_request("cells/cell1") req1.environ['nova.context'] = self.context - res_dict1 = self.controller.update(req1, 'cell1', body1) + res_dict1 = self.controller.update(req1, 'cell1', body=body1) cell1 = res_dict1['cell'] req2 = self._get_request("cells/cell2") req2.environ['nova.context'] = self.context - res_dict2 = self.controller.update(req2, 'cell2', body2) + res_dict2 = self.controller.update(req2, 'cell2', body=body2) cell2 = res_dict2['cell'] self.assertEqual(cell1['name'], 'cell1') @@ -500,7 +485,7 @@ def sync_instances(self, context, **kwargs): self.assertEqual(call_info['updated_since'], expected) body = {'updated_since': 'skjdfkjsdkf'} - self.assertRaises(exc.HTTPBadRequest, + self.assertRaises(exception.ValidationError, self.controller.sync_instances, req, body=body) body = {'deleted': False} @@ -522,11 +507,11 @@ def sync_instances(self, context, **kwargs): self.assertEqual(call_info['deleted'], True) body = {'deleted': 'foo'} - self.assertRaises(exc.HTTPBadRequest, + self.assertRaises(exception.ValidationError, self.controller.sync_instances, req, body=body) body = {'foo': 'meow'} - self.assertRaises(exc.HTTPBadRequest, + self.assertRaises(exception.ValidationError, self.controller.sync_instances, req, body=body) def test_sync_instances_fails_for_invalid_policy(self): @@ -541,7 +526,7 @@ def sync_instances(self, context, **kwargs): body = {} self.assertRaises(exception.PolicyNotAuthorized, - self.controller.sync_instances, req, body) + self.controller.sync_instances, req, body=body) def test_cells_disabled(self): self.flags(enable=False, group='cells') diff --git a/nova/tests/test_api_validation.py b/nova/tests/test_api_validation.py index 7544c8d7b3..9a9348839b 100644 --- a/nova/tests/test_api_validation.py +++ b/nova/tests/test_api_validation.py @@ -602,6 +602,51 @@ def test_validate_tcp_udp_port_fails(self): expected_detail=detail) +class DatetimeTestCase(APIValidationTestCase): + + def setUp(self): + super(DatetimeTestCase, self).setUp() + schema = { + 'type': 'object', + 'properties': { + 'foo': { + 'type': 'string', + 'format': 'date-time', + }, + }, + } + + @validation.schema(schema) + def post(body): + return 'Validation succeeded.' + + self.post = post + + def test_validate_datetime(self): + self.assertEqual('Validation succeeded.', + self.post( + body={'foo': '2014-01-14T01:00:00Z'} + )) + + def test_validate_datetime_fails(self): + detail = ("Invalid input for field/attribute foo." + " Value: 2014-13-14T01:00:00Z." + " '2014-13-14T01:00:00Z' is not a 'date-time'") + self.check_validation_error(self.post, + body={'foo': '2014-13-14T01:00:00Z'}, + expected_detail=detail) + + detail = ("Invalid input for field/attribute foo." + " Value: bar. 'bar' is not a 'date-time'") + self.check_validation_error(self.post, body={'foo': 'bar'}, + expected_detail=detail) + + detail = ("Invalid input for field/attribute foo. Value: 1." + " '1' is not a 'date-time'") + self.check_validation_error(self.post, body={'foo': '1'}, + expected_detail=detail) + + class UuidTestCase(APIValidationTestCase): def setUp(self): From 99e6ec9131c2e3eeb5fe3e1a4dc5a26e6302ebd2 Mon Sep 17 00:00:00 2001 From: Christopher Yeoh Date: Tue, 8 Apr 2014 16:08:56 +0930 Subject: [PATCH 218/486] Fix _parse_datetime in simple tenant usage extension _parse_datetime in os-simple-tenant-usage incorrectly attempts to parse the datetime string even when it is None. As a result a 400 BadRequest is returned. This has the overall effect of making the start and end datetime parameters compulsory when they are meant to be optional. Note that this restores the behavior where start/end are optional which is in contradiction to the API docs, but is the long standing API behaviour. This was an API change which was accidentally applied in commit I8e0e870727d687da165c809ffb7a4456bff81122 as part of some nova internal changes but not picked up by the unitests Change-Id: I00427379dc7aa39770f9a16ff026addb6e311735 Closes-Bug: 1300972 --- .../compute/contrib/simple_tenant_usage.py | 21 ++++++++++--------- .../contrib/test_simple_tenant_usage.py | 19 +++++++++++++++++ 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py index 361df17493..f261428fef 100644 --- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py @@ -218,17 +218,18 @@ def _parse_datetime(self, dtstr): value = timeutils.utcnow() elif isinstance(dtstr, datetime.datetime): value = dtstr - for fmt in ["%Y-%m-%dT%H:%M:%S", - "%Y-%m-%dT%H:%M:%S.%f", - "%Y-%m-%d %H:%M:%S.%f"]: - try: - value = parse_strtime(dtstr, fmt) - break - except exception.InvalidStrTime: - pass else: - msg = _("Datetime is in invalid format") - raise exception.InvalidStrTime(reason=msg) + for fmt in ["%Y-%m-%dT%H:%M:%S", + "%Y-%m-%dT%H:%M:%S.%f", + "%Y-%m-%d %H:%M:%S.%f"]: + try: + value = parse_strtime(dtstr, fmt) + break + except exception.InvalidStrTime: + pass + else: + msg = _("Datetime is in invalid format") + raise exception.InvalidStrTime(reason=msg) # NOTE(mriedem): Instance object DateTime fields are timezone-aware # so we have to force UTC timezone for comparing this datetime against diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py index 9dc17edf5b..4a94be2241 100644 --- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py @@ -271,6 +271,25 @@ def test_get_tenants_usage_with_invalid_start_date(self): init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 400) + def _test_get_tenants_usage_with_one_date(self, date_url_param): + req = webob.Request.blank( + '/v2/faketenant_0/os-simple-tenant-usage/' + 'faketenant_0?%s' % date_url_param) + req.method = "GET" + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context, + init_only=('os-simple-tenant-usage',))) + self.assertEqual(200, res.status_int) + + def test_get_tenants_usage_with_no_start_date(self): + self._test_get_tenants_usage_with_one_date( + 'end=%s' % (NOW + datetime.timedelta(5)).isoformat()) + + def test_get_tenants_usage_with_no_end_date(self): + self._test_get_tenants_usage_with_one_date( + 'start=%s' % (NOW - datetime.timedelta(5)).isoformat()) + class SimpleTenantUsageSerializerTest(test.TestCase): def _verify_server_usage(self, raw_usage, tree): From c363dae6a2b878db6801b502cced1fcc6aad2d0c Mon Sep 17 00:00:00 2001 From: Paul Murray Date: Fri, 25 Jul 2014 06:03:35 +0100 Subject: [PATCH 219/486] Fix Resource tracker should report virt driver stats If the virt driver provides any data for resource stats it is lost whenever the resource tracker updates its own view of stats. Moreover, if the resource tracker has not instances to track it only reports the driver's view, which might be nothing. This fix adds the driver's view of stats to the resource tracker stats to make sure they are correctly handled. Change-Id: Icb19148660bca542a8120ecab064551d67ac28af Closes-bug: #1348288 --- nova/compute/resource_tracker.py | 4 +- nova/compute/stats.py | 17 +++ nova/tests/compute/test_resource_tracker.py | 116 +++++++++++++++++++- 3 files changed, 133 insertions(+), 4 deletions(-) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index fb65f77c3a..37ea921f6e 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -609,8 +609,10 @@ def _update_usage_from_instances(self, resources, instances): """ self.tracked_instances.clear() - # purge old stats + # purge old stats and init with anything passed in by the driver self.stats.clear() + self.stats.digest_stats(resources.get('stats')) + resources['stats'] = jsonutils.dumps(self.stats) # set some initial values, reserve room for host/hypervisor: resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024 diff --git a/nova/compute/stats.py b/nova/compute/stats.py index bf183b012c..559d7cba7d 100644 --- a/nova/compute/stats.py +++ b/nova/compute/stats.py @@ -15,6 +15,8 @@ from nova.compute import task_states from nova.compute import vm_states +from nova.i18n import _ +from nova.openstack.common import jsonutils class Stats(dict): @@ -31,6 +33,21 @@ def clear(self): self.states.clear() + def digest_stats(self, stats): + """Apply stats provided as a dict or a json encoded string.""" + # NOTE(pmurray): allow json strings as some drivers pass in + # stats in that way - they shouldn't really do that. + if stats is None: + return + if isinstance(stats, dict): + self.update(stats) + return + if isinstance(stats, str): + _stats_from_json = jsonutils.loads(stats) + self.update(_stats_from_json) + return + raise ValueError(_('Unexpected type adding stats')) + @property def io_workload(self): """Calculate an I/O based load by counting I/O heavy operations.""" diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 364cfd6e2d..119689519f 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -45,6 +45,8 @@ EPHEMERAL_GB = 1 FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB FAKE_VIRT_VCPUS = 1 +FAKE_VIRT_STATS = {'virt_stat': 10} +FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS) CONF = cfg.CONF @@ -64,7 +66,7 @@ def get_available_resource(self, nodename): class FakeVirtDriver(driver.ComputeDriver): - def __init__(self, pci_support=False): + def __init__(self, pci_support=False, stats=None): super(FakeVirtDriver, self).__init__(None) self.memory_mb = FAKE_VIRT_MEMORY_MB self.local_gb = FAKE_VIRT_LOCAL_GB @@ -87,6 +89,8 @@ def __init__(self, pci_support=False): 'vendor_id': 'v1', 'product_id': 'p1', 'extra_info': {'extra_k1': 'v1'}}] if self.pci_support else [] + if stats is not None: + self.stats = stats def get_host_ip_addr(self): return '127.0.0.1' @@ -106,7 +110,8 @@ def get_available_resource(self, nodename): } if self.pci_support: d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices) - + if hasattr(self, 'stats'): + d['stats'] = self.stats return d def estimate_instance_overhead(self, instance_info): @@ -441,7 +446,7 @@ def setUp(self): self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node', self._fake_migration_get_in_progress_by_host_and_node) - self.tracker.update_available_resource(self.context) + self._init_tracker() self.limits = self._limits() def _fake_service_get_by_compute_host(self, ctx, host): @@ -483,6 +488,9 @@ def _fake_migration_update(self, ctxt, migration_id, values): migration.update(values) return migration + def _init_tracker(self): + self.tracker.update_available_resource(self.context) + def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD, disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS): @@ -1161,3 +1169,105 @@ def test_periodic_status_update(self): driver.memory_mb += 1 self.tracker.update_available_resource(self.context) self.assertEqual(2, self.update_call_count) + + +class StatsDictTestCase(BaseTrackerTestCase): + """Test stats handling for a virt driver that provides + stats as a dictionary. + """ + def _driver(self): + return FakeVirtDriver(stats=FAKE_VIRT_STATS) + + def _get_stats(self): + return jsonutils.loads(self.tracker.compute_node['stats']) + + def test_virt_stats(self): + # start with virt driver stats + stats = self._get_stats() + self.assertEqual(FAKE_VIRT_STATS, stats) + + # adding an instance should keep virt driver stats + self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host) + self.tracker.update_available_resource(self.context) + + stats = self._get_stats() + expected_stats = {} + expected_stats.update(FAKE_VIRT_STATS) + expected_stats.update(self.tracker.stats) + self.assertEqual(expected_stats, stats) + + # removing the instances should keep only virt driver stats + self._instances = {} + self.tracker.update_available_resource(self.context) + + stats = self._get_stats() + self.assertEqual(FAKE_VIRT_STATS, stats) + + +class StatsJsonTestCase(BaseTrackerTestCase): + """Test stats handling for a virt driver that provides + stats as a json string. + """ + def _driver(self): + return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON) + + def _get_stats(self): + return jsonutils.loads(self.tracker.compute_node['stats']) + + def test_virt_stats(self): + # start with virt driver stats + stats = self._get_stats() + self.assertEqual(FAKE_VIRT_STATS, stats) + + # adding an instance should keep virt driver stats + # and add rt stats + self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host) + self.tracker.update_available_resource(self.context) + + stats = self._get_stats() + expected_stats = {} + expected_stats.update(FAKE_VIRT_STATS) + expected_stats.update(self.tracker.stats) + self.assertEqual(expected_stats, stats) + + # removing the instances should keep only virt driver stats + self._instances = {} + self.tracker.update_available_resource(self.context) + stats = self._get_stats() + self.assertEqual(FAKE_VIRT_STATS, stats) + + +class StatsInvalidJsonTestCase(BaseTrackerTestCase): + """Test stats handling for a virt driver that provides + an invalid type for stats. + """ + def _driver(self): + return FakeVirtDriver(stats='this is not json') + + def _init_tracker(self): + # do not do initial update in setup + pass + + def test_virt_stats(self): + # should throw exception for string that does not parse as json + self.assertRaises(ValueError, + self.tracker.update_available_resource, + context=self.context) + + +class StatsInvalidTypeTestCase(BaseTrackerTestCase): + """Test stats handling for a virt driver that provides + an invalid type for stats. + """ + def _driver(self): + return FakeVirtDriver(stats=10) + + def _init_tracker(self): + # do not do initial update in setup + pass + + def test_virt_stats(self): + # should throw exception for incorrect stats value type + self.assertRaises(ValueError, + self.tracker.update_available_resource, + context=self.context) From eeeb5830e3ddadcc3f662ef3512f16af0ec1cac4 Mon Sep 17 00:00:00 2001 From: Pawel Koniszewski Date: Wed, 30 Jul 2014 09:10:09 -0400 Subject: [PATCH 220/486] Parse unicode cpu_info as json before using it If the extra specs are some of this: - (capabilities:cpu_info:vendor, Intel) - (capabilities:cpu_info:topology:cores, 2) - (capabilities:cpu_info:features, rdtscp) It doesn't work because the cpu_info is loaded as unicode in HostState. The patch fix these cases by parsing the cpu_info to dict. Update: use six.string_types instead of unicode. Change-Id: Ieabf95b1e3e5893ced16c0571c29b13b252df5d1 Closes-Bug: #1331176 Co-Authored-By: Pawel Koniszewski --- .../filters/compute_capabilities_filter.py | 8 +++++++ nova/tests/scheduler/test_host_filters.py | 23 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py index 178ca49363..ac68d509fb 100644 --- a/nova/scheduler/filters/compute_capabilities_filter.py +++ b/nova/scheduler/filters/compute_capabilities_filter.py @@ -13,6 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. +import six + +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops @@ -45,6 +48,11 @@ def _satisfies_extra_specs(self, host_state, instance_type): cap = host_state for index in range(0, len(scope)): try: + if isinstance(cap, six.string_types): + try: + cap = jsonutils.loads(cap) + except ValueError: + return False if not isinstance(cap, dict): if getattr(cap, scope[index], None) is None: # If can't find, check stats dict diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 634cf66927..eb0fdc71fa 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -18,6 +18,7 @@ import httplib from oslo.config import cfg +import six import stubout from nova import context @@ -822,6 +823,28 @@ def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes): assertion = self.assertTrue if passes else self.assertFalse assertion(filt_cls.host_passes(host, filter_properties)) + def test_compute_filter_pass_cpu_info_as_text_type(self): + cpu_info = """ { "vendor": "Intel", "model": "core2duo", + "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology": + {"cores": 1, "threads":1, "sockets": 1}} """ + + cpu_info = six.text_type(cpu_info) + + self._do_test_compute_filter_extra_specs( + ecaps={'cpu_info': cpu_info}, + especs={'capabilities:cpu_info:vendor': 'Intel'}, + passes=True) + + def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self): + cpu_info = "cpu_info" + + cpu_info = six.text_type(cpu_info) + + self._do_test_compute_filter_extra_specs( + ecaps={'cpu_info': cpu_info}, + especs={'capabilities:cpu_info:vendor': 'Intel'}, + passes=False) + def test_compute_filter_passes_extra_specs_simple(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, From 21bf0219f66ca9041bc0cf9c29e3a23c054d125c Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 30 Jul 2014 06:45:11 -0700 Subject: [PATCH 221/486] Update dev env docs on libvirt-dev(el) requirement Commit 8f505b85268adc226ec0a83826c2d13edcbe3d7c added a test requirement on libvirt-python which requires libvirt-dev(el) being installed, so update the docs to add that package for the distro. Change-Id: I7169d3c78a6babe6e20c0dc35874167d0bfee922 --- doc/source/devref/development.environment.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 255ece68a7..790b929fe1 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -60,7 +60,7 @@ Install the prerequisite packages. On Ubuntu:: - sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev + sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev libvirt-dev On Ubuntu Precise (12.04) you may also need to add the following packages:: @@ -68,7 +68,7 @@ On Ubuntu Precise (12.04) you may also need to add the following packages:: On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: - sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel + sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel sudo pip-python install tox From 5be2e456c06bd35ff96066711ee4bbfd1cafe50a Mon Sep 17 00:00:00 2001 From: Jon Grimm Date: Wed, 30 Jul 2014 00:46:57 +0000 Subject: [PATCH 222/486] Improve logging when python-guestfs/libguestfs isn't working Bumped from debug to warn level, as most often refers to missing python-guestfs package or other libguestfs misconfiguration, and the fallback path is usually not desired. Closes-bug: 1240339 Change-Id: I5d90a3ea109e79fe17221c001f94d49d599048ba --- nova/virt/disk/vfs/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/virt/disk/vfs/api.py b/nova/virt/disk/vfs/api.py index a98f12b4f8..d272968c00 100644 --- a/nova/virt/disk/vfs/api.py +++ b/nova/virt/disk/vfs/api.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova.i18n import _LW from nova.openstack.common import importutils from nova.openstack.common import log as logging @@ -40,7 +41,8 @@ def instance_for_image(imgfile, imgfmt, partition): "nova.virt.disk.vfs.guestfs.VFSGuestFS", imgfile, imgfmt, partition) else: - LOG.debug("Falling back to VFSLocalFS") + LOG.warn(_LW("Unable to import guestfs, " + "falling back to VFSLocalFS")) return importutils.import_object( "nova.virt.disk.vfs.localfs.VFSLocalFS", imgfile, imgfmt, partition) From 5c3f212343df997daa48f1f4a1cdd2a29099c288 Mon Sep 17 00:00:00 2001 From: lvdongbing Date: Tue, 29 Jul 2014 23:40:44 +0800 Subject: [PATCH 223/486] libvirt re-define guest with wrong XML document In the nova/virt/libvirt/driver.py file, the '_live_snapshot' and '_swap_volume' methods have the following code flow xml = dom.XMLDesc(0) dom.undefine() dom.blockRebase() dom.defineXML(xml) The reason for this is that 'blockRebase' requires the guest to be transient, so we must temporarily delete the persistent config and then re-create it later. Unfortunately this code is using the wrong XML document when re-creating the persistent config. 'dom.XMLDesc(0)' will return the guest XML document based on the current guest state. Since the guest is running in both these cases, it will get getting the *live* XML instead of the persistent XML.So these methods are deleting the persistent XML and replacing it with the live XML. These two different XML documents are not guaranteed to contain the same information. As a second problem, it is not requesting inclusion of security information, so any SPICE/VNC password set in the persistent XML is getting lost. Change-Id: I4b4e0990ca6c07a9215766f994884a1fb18f3a41 Closes-Bug: #1346191 --- nova/tests/virt/libvirt/test_driver.py | 8 ++++++-- nova/virt/libvirt/driver.py | 12 ++++++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 674fdabcb5..2cd581fea0 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -8037,7 +8037,9 @@ def test_swap_volume(self): drvr._swap_volume(mock_dom, srcfile, dstfile) - mock_dom.XMLDesc.assert_called_once_with(0) + mock_dom.XMLDesc.assert_called_once_with( + fakelibvirt.VIR_DOMAIN_XML_INACTIVE | + fakelibvirt.VIR_DOMAIN_XML_SECURE) mock_dom.blockRebase.assert_called_once_with( srcfile, dstfile, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | @@ -8073,7 +8075,9 @@ def test_live_snapshot(self): drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2") - mock_dom.XMLDesc.assert_called_once_with(0) + mock_dom.XMLDesc.assert_called_once_with( + fakelibvirt.VIR_DOMAIN_XML_INACTIVE | + fakelibvirt.VIR_DOMAIN_XML_SECURE) mock_dom.blockRebase.assert_called_once_with( srcfile, dltfile, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 47d670b6a4..f75efc0a07 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1292,8 +1292,10 @@ def attach_volume(self, context, connection_info, instance, mountpoint, def _swap_volume(self, domain, disk_path, new_path): """Swap existing disk with a new block device.""" - # Save a copy of the domain's running XML file - xml = domain.XMLDesc(0) + # Save a copy of the domain's persistent XML file + xml = domain.XMLDesc( + libvirt.VIR_DOMAIN_XML_INACTIVE | + libvirt.VIR_DOMAIN_XML_SECURE) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. @@ -1639,8 +1641,10 @@ def _wait_for_block_job(domain, disk_path, abort_on_error=False): def _live_snapshot(self, domain, disk_path, out_path, image_format): """Snapshot an instance without downtime.""" - # Save a copy of the domain's running XML file - xml = domain.XMLDesc(0) + # Save a copy of the domain's persistent XML file + xml = domain.XMLDesc( + libvirt.VIR_DOMAIN_XML_INACTIVE | + libvirt.VIR_DOMAIN_XML_SECURE) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. From 44ead0692774efe1d513af72ad037c67f7dcabae Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 30 Jul 2014 10:08:21 -0700 Subject: [PATCH 224/486] Update devref setup docs for latest libvirt on ubuntu Adds instructions on how to enable the ubuntu cloud-archive and update to the latest libvirt-dev from the icehouse repo. This is needed to run unit tests in Juno if you're still on ubuntu precise. Change-Id: I8c93afdd72256b2bdc487e1413bdd570a6ea59e8 --- doc/source/devref/development.environment.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 790b929fe1..db2ef029bc 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -65,6 +65,10 @@ On Ubuntu:: On Ubuntu Precise (12.04) you may also need to add the following packages:: sudo apt-get build-dep python-mysqldb + # enable cloud-archive to get the latest libvirt + sudo apt-get install python-software-properties + sudo add-apt-repository cloud-archive:icehouse + sudo apt-get install libvirt-dev On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: From a4c580ff03f4abb03970dd6de315ca0ba6849617 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 29 Jul 2014 10:18:13 -0700 Subject: [PATCH 225/486] Add trace logging to allocate_fixed_ip The address is being logged as None in some cases that are failing in grenade jobs so this adds more trace logging to the base network manager's allocate_fixed_ip method so we can see which paths are being taken in the code and what the outputs are. Change-Id: I37de4b3bbb9e51b57eb4d048e05fc00382eed23d Related-Bug: #1349617 --- nova/network/manager.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index e47d2adc25..08a4ad4525 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -881,13 +881,26 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs): if network['cidr']: address = kwargs.get('address', None) if address: + LOG.debug('Associating instance with specified fixed IP ' + '%(address)s in network %(network)s on subnet ' + '%(cidr)s.' % + {'address': address, 'network': network['id'], + 'cidr': network['cidr']}, + instance=instance) fip = objects.FixedIP.associate(context, str(address), instance_id, network['id']) else: + LOG.debug('Associating instance with fixed IP from pool ' + 'in network %(network)s on subnet %(cidr)s.' % + {'network': network['id'], + 'cidr': network['cidr']}, + instance=instance) fip = objects.FixedIP.associate_pool( context.elevated(), network['id'], instance_id) + address = str(fip.address) + vif = objects.VirtualInterface.get_by_instance_and_network( context, instance_id, network['id']) fip.allocated = True @@ -895,6 +908,8 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs): fip.save() cleanup.append(fip.disassociate) + LOG.debug('Refreshing security group members for instance.', + instance=instance) self._do_trigger_security_group_members_refresh_for_instance( instance_id) cleanup.append(functools.partial( @@ -917,14 +932,23 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs): self.instance_dns_manager.delete_entry, instance_id, self.instance_dns_domain)) + LOG.debug('Setting up network %(network)s on host %(host)s.' % + {'network': network['id'], 'host': self.host}, + instance=instance) self._setup_network_on_host(context, network) cleanup.append(functools.partial( self._teardown_network_on_host, context, network)) quotas.commit(context) - LOG.debug('Allocated fixed ip %s on network %s', address, - network['uuid'], instance=instance) + if address is None: + # TODO(mriedem): should _setup_network_on_host return the addr? + LOG.debug('Fixed IP is setup on network %s but not returning ' + 'the specific IP from the base network manager.', + network['uuid'], instance=instance) + else: + LOG.debug('Allocated fixed ip %s on network %s', address, + network['uuid'], instance=instance) return address except Exception: From 9b559c31b781689fb66551f29a0cb8d10c7bac94 Mon Sep 17 00:00:00 2001 From: Dmitry Borodaenko Date: Mon, 23 Jun 2014 16:12:18 -0700 Subject: [PATCH 226/486] Use Ceph cluster stats to report disk info on RBD Local disk statistics on compute nodes are irrelevant when ephemeral disks are stored in RBD. With RBD, local disk space is not consumed when instances are started on a compute node, yet it is possible for scheduler to refuse to schedule an instance when combined disk usage of instances already running on the node exceeds total disk capacity reported by the hypervisor driver. Change-Id: I9718c727db205b6f2191f8435583391584e96e6e Closes-bug: #1332660 Signed-off-by: Dmitry Borodaenko --- nova/virt/libvirt/driver.py | 11 ++++++++--- nova/virt/libvirt/rbd.py | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 47d670b6a4..d0177f8327 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1099,12 +1099,15 @@ def cleanup(self, context, instance, network_info, block_device_info=None, if CONF.libvirt.images_type == 'rbd': self._cleanup_rbd(instance) - def _cleanup_rbd(self, instance): - driver = rbd.RBDDriver( + @staticmethod + def _get_rbd_driver(): + return rbd.RBDDriver( pool=CONF.libvirt.images_rbd_pool, ceph_conf=CONF.libvirt.images_rbd_ceph_conf, rbd_user=CONF.libvirt.rbd_user) - driver.cleanup_volumes(instance) + + def _cleanup_rbd(self, instance): + LibvirtDriver._get_rbd_driver().cleanup_volumes(instance) def _cleanup_lvm(self, instance): """Delete all LVM disks for given instance object.""" @@ -3855,6 +3858,8 @@ def _get_local_gb_info(): if CONF.libvirt.images_type == 'lvm': info = lvm.get_volume_group_info( CONF.libvirt.images_volume_group) + elif CONF.libvirt.images_type == 'rbd': + info = LibvirtDriver._get_rbd_driver().get_pool_info() else: info = libvirt_utils.get_fs_info(CONF.instances_path) diff --git a/nova/virt/libvirt/rbd.py b/nova/virt/libvirt/rbd.py index a7507f8bed..e638cf97c8 100644 --- a/nova/virt/libvirt/rbd.py +++ b/nova/virt/libvirt/rbd.py @@ -30,6 +30,7 @@ from nova.openstack.common import excutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging +from nova.openstack.common import units from nova import utils LOG = logging.getLogger(__name__) @@ -267,3 +268,10 @@ def belongs_to_instance(disk): LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s ' 'failed'), {'volume': volume, 'pool': self.pool}) + + def get_pool_info(self): + with RADOSClient(self) as client: + stats = client.cluster.get_cluster_stats() + return {'total': stats['kb'] * units.Ki, + 'free': stats['kb_avail'] * units.Ki, + 'used': stats['kb_used'] * units.Ki} From 5dea55785a52b7edb383cf61fab2f65f2ae79a72 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 30 Jul 2014 18:57:34 +0000 Subject: [PATCH 227/486] Revert "libvirt: add version cap tied to gate CI testing" This seems like it was really a policy change that should have had more community discussion. I think it's appropriate to revert this now and have this as a full community discussion about options here. This reverts commit 3bfc25a74e9dfbeb664eecf89d959428713eb178. Change-Id: Ie19b39e99b223862ca86b9a54c5372b56e1f5d74 --- nova/tests/virt/libvirt/test_driver.py | 15 --------------- nova/virt/libvirt/driver.py | 13 ------------- 2 files changed, 28 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 0280be9ff5..ccde7975ec 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -576,21 +576,6 @@ def test_public_api_signatures(self): inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(inst) - def test_min_version_cap(self): - drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - - with mock.patch.object(drvr._conn, 'getLibVersion') as mock_ver: - mock_ver.return_value = utils.convert_version_to_int((1, 5, 0)) - - self.flags(version_cap="2.0.0", group="libvirt") - self.assertTrue(drvr._has_min_version((1, 4, 0))) - - self.flags(version_cap="1.3.0", group="libvirt") - self.assertFalse(drvr._has_min_version((1, 4, 0))) - - self.flags(version_cap="", group="libvirt") - self.assertTrue(drvr._has_min_version((1, 4, 0))) - def test_set_host_enabled_with_disable(self): # Tests disabling an enabled host. conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 68889ccfa4..31712fdfd8 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -106,11 +106,6 @@ LOG = logging.getLogger(__name__) libvirt_opts = [ - cfg.StrOpt('version_cap', - default='1.2.2', # Must always match the version in the gate - help='Limit use of features from newer libvirt versions. ' - 'Defaults to the version that is used for automated ' - 'testing of OpenStack.'), cfg.StrOpt('rescue_image_id', help='Rescue ami image. This will not be used if an image id ' 'is provided by the user.'), @@ -418,14 +413,6 @@ def _conn_has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None): try: if lv_ver is not None: libvirt_version = conn.getLibVersion() - - if CONF.libvirt.version_cap: - libvirt_version_cap = utils.convert_version_to_int( - utils.convert_version_to_tuple( - CONF.libvirt.version_cap)) - if libvirt_version > libvirt_version_cap: - libvirt_version = libvirt_version_cap - if libvirt_version < utils.convert_version_to_int(lv_ver): return False From cf024616111aab8c5f9dfdb350ab9cdff0633f91 Mon Sep 17 00:00:00 2001 From: Christopher Lefelhocz Date: Fri, 25 Jul 2014 15:47:32 -0500 Subject: [PATCH 228/486] Add unit tests to cells conductor link Adding unit tests to verify ConductorTaskRPCAPIRedirect and compute/api.py are properly hooked. If they become out of alignment then calls can be potentially ignored. We can't blindly error in the case of ignoring as this is used legitimately in other cases. Change-Id: Iec02d4a5906a5851c1e9c8258a2d77f908ec65bc Partial-Bug: 1348642 --- nova/tests/compute/test_compute_cells.py | 80 ++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py index 136282da43..69d611f86c 100644 --- a/nova/tests/compute/test_compute_cells.py +++ b/nova/tests/compute/test_compute_cells.py @@ -16,6 +16,7 @@ Tests For Compute w/ Cells """ import functools +import inspect import mock from oslo.config import cfg @@ -23,9 +24,16 @@ from nova.cells import manager from nova.compute import api as compute_api from nova.compute import cells_api as compute_cells_api +from nova.compute import flavors +from nova.compute import vm_states +from nova import context from nova import db +from nova import objects +from nova.openstack.common import timeutils from nova import quota +from nova import test from nova.tests.compute import test_compute +from nova.tests import fake_instance ORIG_COMPUTE_API = None @@ -204,6 +212,78 @@ def cast(context, method, *args, **kwargs): self.assertEqual(1, mock_msg.call_count) +class CellsConductorAPIRPCRedirect(test.NoDBTestCase): + def setUp(self): + super(CellsConductorAPIRPCRedirect, self).setUp() + + self.compute_api = compute_cells_api.ComputeCellsAPI() + self.cells_rpcapi = mock.MagicMock() + self.compute_api._compute_task_api.cells_rpcapi = self.cells_rpcapi + + self.context = context.RequestContext('fake', 'fake') + + @mock.patch.object(compute_api.API, '_record_action_start') + @mock.patch.object(compute_api.API, '_provision_instances') + @mock.patch.object(compute_api.API, '_check_and_transform_bdm') + @mock.patch.object(compute_api.API, '_get_image') + @mock.patch.object(compute_api.API, '_validate_and_build_base_options') + def test_build_instances(self, _validate, _get_image, _check_bdm, + _provision, _record_action_start): + _get_image.return_value = (None, 'fake-image') + _validate.return_value = (None, 1) + _check_bdm.return_value = 'bdms' + _provision.return_value = 'instances' + + self.compute_api.create(self.context, 'fake-flavor', 'fake-image') + + # Subsequent tests in class are verifying the hooking. We don't check + # args since this is verified in compute test code. + self.assertTrue(self.cells_rpcapi.build_instances.called) + + @mock.patch.object(compute_api.API, '_record_action_start') + @mock.patch.object(compute_api.API, '_resize_cells_support') + @mock.patch.object(compute_api.API, '_reserve_quota_delta') + @mock.patch.object(compute_api.API, '_upsize_quota_delta') + @mock.patch.object(objects.Instance, 'save') + @mock.patch.object(flavors, 'extract_flavor') + @mock.patch.object(compute_api.API, '_check_auto_disk_config') + def test_resize_instance(self, _check, _extract, _save, _upsize, _reserve, + _cells, _record): + _extract.return_value = {'name': 'fake', 'id': 'fake'} + orig_system_metadata = {} + instance = fake_instance.fake_instance_obj(self.context, + vm_state=vm_states.ACTIVE, cell_name='fake-cell', + launched_at=timeutils.utcnow(), + system_metadata=orig_system_metadata, + expected_attrs=['system_metadata']) + + self.compute_api.resize(self.context, instance) + self.assertTrue(self.cells_rpcapi.resize_instance.called) + + @mock.patch.object(objects.Instance, 'save') + def test_live_migrate_instance(self, instance_save): + orig_system_metadata = {} + instance = fake_instance.fake_instance_obj(self.context, + vm_state=vm_states.ACTIVE, cell_name='fake-cell', + launched_at=timeutils.utcnow(), + system_metadata=orig_system_metadata, + expected_attrs=['system_metadata']) + + self.compute_api.live_migrate(self.context, instance, + True, True, 'fake_dest_host') + + self.assertTrue(self.cells_rpcapi.live_migrate_instance.called) + + def test_check_equal(self): + task_api = self.compute_api.compute_task_api + tests = set() + for (name, value) in inspect.getmembers(self, inspect.ismethod): + if name.startswith('test_') and name != 'test_check_equal': + tests.add(name[5:]) + if tests != set(task_api.cells_compatible): + self.fail("Testcases not equivalent to cells_compatible list") + + class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): super(CellsComputePolicyTestCase, self).setUp() From dfb0239f4c20135270d0e278fecc1c8bdd6ffdd7 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 31 Jul 2014 10:16:02 +0900 Subject: [PATCH 229/486] Fix fake_update in test_update_missing_server In test_update_missing_server, fake_update should be used as update() method but it is used as create() instead. In addition, the test comment also is wrong. This patch fixes them. Change-Id: Ia4704413dd7355010546dba7096ed34225bb4b0f --- nova/tests/api/openstack/compute/plugins/v3/test_servers.py | 4 ++-- nova/tests/api/openstack/compute/test_servers.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index f1f08a7d6d..900b868439 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -3051,12 +3051,12 @@ def fake_create(*args, **kwargs): self.assertEqual(400, res.status_int) def test_update_missing_server(self): - # Test create with malformed body. + # Test update with malformed body. def fake_update(*args, **kwargs): raise test.TestingException("Should not reach the compute API.") - self.stubs.Set(compute_api.API, 'create', fake_update) + self.stubs.Set(compute_api.API, 'update', fake_update) req = fakes.HTTPRequestV3.blank('/servers/1') req.method = 'PUT' diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index fa67c8d3a4..c2a5d76a11 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -4828,12 +4828,12 @@ def fake_create(*args, **kwargs): self.assertEqual(422, res.status_int) def test_update_missing_server(self): - # Test create with malformed body. + # Test update with malformed body. def fake_update(*args, **kwargs): raise test.TestingException("Should not reach the compute API.") - self.stubs.Set(compute_api.API, 'create', fake_update) + self.stubs.Set(compute_api.API, 'update', fake_update) req = fakes.HTTPRequest.blank('/fake/servers/1') req.method = 'PUT' From 1d24e2cb63f6656f4300f9ad6182c5af7990fe73 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Wed, 30 Jul 2014 22:17:00 +0800 Subject: [PATCH 230/486] Move check_image_exists out of try in _inject_data Move check_image_exists out of try in _inject_data and some typo fixes Change-Id: I9a6de0f993222c1921a0fc4d13ec14d875af2ed1 --- nova/virt/libvirt/driver.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 47d670b6a4..b499de59e3 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2553,7 +2553,7 @@ def _is_booted_from_volume(instance, disk_mapping): or 'disk' not in disk_mapping) def _inject_data(self, instance, network_info, admin_pass, files, suffix): - """Injects data in an disk image + """Injects data in a disk image Helper used for injecting data in a disk image file system. @@ -2562,7 +2562,7 @@ def _inject_data(self, instance, network_info, admin_pass, files, suffix): network_info -- a dict that refers network speficications admin_pass -- a string used to set an admin password files -- a list of files needs to be injected - suffix -- a string used as a image name suffix + suffix -- a string used as an image name suffix """ # Handles the partition need to be used. target_partition = None @@ -2598,17 +2598,17 @@ def _inject_data(self, instance, network_info, admin_pass, files, suffix): image_type) img_id = instance['image_ref'] + if not injection_image.check_image_exists(): + LOG.warn(_LW('Image %s not found on disk storage. ' + 'Continue without injecting data'), + injection_image.path, instance=instance) + return try: - if injection_image.check_image_exists(): - disk.inject_data(injection_image.path, - key, net, metadata, admin_pass, files, - partition=target_partition, - use_cow=CONF.use_cow_images, - mandatory=('files',)) - else: - LOG.warn(_LW('Image %s not found on disk storage. ' - 'Continue without injecting data'), - injection_image.path, instance=instance) + disk.inject_data(injection_image.path, + key, net, metadata, admin_pass, files, + partition=target_partition, + use_cow=CONF.use_cow_images, + mandatory=('files',)) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error injecting data into image ' From f3cc688405cbfa386f6ee4b40da33c13a415a92d Mon Sep 17 00:00:00 2001 From: Angus Lees Date: Thu, 31 Jul 2014 14:03:45 +1000 Subject: [PATCH 231/486] Issue multiple SQL statements in separate engine.execute() calls Some sqlalchemy drivers (eg: mysqlconnector) don't support engine.execute("sql stmt 1; sql stmt 2;") and require a separate execute() call for each SQL statement. After discussions with sqlalchemy author, he confirmed it would be better to fix callers rather than attempt to patch in support for multiple statements. With this patch, nova-manage db sync succeeds using mysqlconnector. Change-Id: I57e6ecdafe90110eaffe757f0ef0bf7b41b0f3e3 --- .../db/sqlalchemy/migrate_repo/versions/216_havana.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py index 1712784e4b..0469e5a265 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py @@ -1536,13 +1536,14 @@ def upgrade(migrate_engine): refcolumns=fkey_pair[1]) fkey.create() - if migrate_engine.name == "mysql": + if migrate_engine.name == 'mysql': # In Folsom we explicitly converted migrate_version to UTF8. - sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;" + migrate_engine.execute( + 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8') # Set default DB charset to UTF8. - sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \ - migrate_engine.url.database - migrate_engine.execute(sql) + migrate_engine.execute( + 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' % + migrate_engine.url.database) _create_shadow_tables(migrate_engine) From 8a7c29c4f5c11487af62af854020bb3d92a0f2d1 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 31 Jul 2014 13:12:08 +0900 Subject: [PATCH 232/486] Merge BadRequest tests of "attach interfaces" API In test_attach_interfaces, there are BadRequest tests of "attach interfaces". Most parts of them are duplicated. This patch merges them for the readability and clarifying their purposes. Change-Id: Ib9dd2840be46dec6b70fe9e3fda35de88f84b4a9 --- .../compute/contrib/test_attach_interfaces.py | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py index c1e9d84ce8..c8c03417e2 100644 --- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py @@ -253,34 +253,35 @@ def test_attach_interface_with_network_id(self): self.assertEqual(result['interfaceAttachment']['net_id'], FAKE_NET_ID2) - def test_attach_interface_with_port_and_network_id(self): + def _attach_interface_bad_request_case(self, body): self.stubs.Set(compute_api.API, 'attach_interface', fake_attach_interface) attachments = attach_interfaces.InterfaceAttachmentController() req = webob.Request.blank('/v2/fake/os-interfaces/attach') req.method = 'POST' - req.body = jsonutils.dumps({'interfaceAttachment': - {'port_id': FAKE_PORT_ID1, - 'net_id': FAKE_NET_ID2}}) + req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, attachments.create, req, FAKE_UUID1, jsonutils.loads(req.body)) + def test_attach_interface_with_port_and_network_id(self): + body = { + 'interfaceAttachment': { + 'port_id': FAKE_PORT_ID1, + 'net_id': FAKE_NET_ID2 + } + } + self._attach_interface_bad_request_case(body) + def test_attach_interface_with_invalid_data(self): - self.stubs.Set(compute_api.API, 'attach_interface', - fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({'interfaceAttachment': - {'net_id': 'bad_id'}}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - attachments.create, req, FAKE_UUID1, - jsonutils.loads(req.body)) + body = { + 'interfaceAttachment': { + 'net_id': 'bad_id' + } + } + self._attach_interface_bad_request_case(body) def test_attach_interface_with_invalid_state(self): def fake_attach_interface_invalid_state(*args, **kwargs): From ab91f3be20756c046f90ce24a3d26f74c4333b97 Mon Sep 17 00:00:00 2001 From: Pawel Koniszewski Date: Thu, 31 Jul 2014 05:32:35 -0400 Subject: [PATCH 233/486] Improved error logging in nova-network for allocate_fixed_ip() When a Nova deployment runs out of fixed IP addresses, allocating another IP ends with stack trace without clear explanation. To prevent such behavior this patch introduces few try-except blocks that writes appropriate error message to the log file and return better error code via API instead of stack trace. Change-Id: I8111ae883719ad693113e1d3bfedc8685427c55f Closes-Bug: #1218572 --- nova/api/openstack/compute/contrib/multinic.py | 6 +++++- nova/api/openstack/compute/plugins/v3/multinic.py | 6 +++++- .../openstack/compute/contrib/test_multinic_xs.py | 15 +++++++++++++++ .../openstack/compute/plugins/v3/test_multinic.py | 15 +++++++++++++++ 4 files changed, 40 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/compute/contrib/multinic.py b/nova/api/openstack/compute/contrib/multinic.py index 6887c2ef8e..9ea526c818 100644 --- a/nova/api/openstack/compute/contrib/multinic.py +++ b/nova/api/openstack/compute/contrib/multinic.py @@ -56,7 +56,11 @@ def _add_fixed_ip(self, req, id, body): instance = self._get_instance(context, id, want_objects=True) network_id = body['addFixedIp']['networkId'] - self.compute_api.add_fixed_ip(context, instance, network_id) + try: + self.compute_api.add_fixed_ip(context, instance, network_id) + except exception.NoMoreFixedIps as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + return webob.Response(status_int=202) @wsgi.action('removeFixedIp') diff --git a/nova/api/openstack/compute/plugins/v3/multinic.py b/nova/api/openstack/compute/plugins/v3/multinic.py index cb6aee2224..b321862456 100644 --- a/nova/api/openstack/compute/plugins/v3/multinic.py +++ b/nova/api/openstack/compute/plugins/v3/multinic.py @@ -49,7 +49,11 @@ def _add_fixed_ip(self, req, id, body): instance = common.get_instance(self.compute_api, context, id, want_objects=True) network_id = body['add_fixed_ip']['network_id'] - self.compute_api.add_fixed_ip(context, instance, network_id) + try: + self.compute_api.add_fixed_ip(context, instance, network_id) + except exception.NoMoreFixedIps as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + return webob.Response(status_int=202) @wsgi.action('remove_fixed_ip') diff --git a/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py b/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py index f6786686c3..94ab69c821 100644 --- a/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py +++ b/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py @@ -13,9 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. +import mock import webob from nova import compute +from nova import exception from nova import objects from nova.openstack.common import jsonutils from nova import test @@ -93,6 +95,19 @@ def test_add_fixed_ip_no_network(self): self.assertEqual(resp.status_int, 422) self.assertEqual(last_add_fixed_ip, (None, None)) + @mock.patch.object(compute.api.API, 'add_fixed_ip') + def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip): + mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps + + body = dict(addFixedIp=dict(networkId='test_net')) + req = webob.Request.blank('/v2/fake/servers/%s/action' % UUID) + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 400) + def test_remove_fixed_ip(self): global last_remove_fixed_ip last_remove_fixed_ip = (None, None) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_multinic.py b/nova/tests/api/openstack/compute/plugins/v3/test_multinic.py index ae6ae283cd..6a0118ba25 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_multinic.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_multinic.py @@ -13,9 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. +import mock import webob from nova import compute +from nova import exception from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes @@ -101,6 +103,19 @@ def test_add_fixed_ip_no_network(self): self.assertEqual(resp.status_int, 400) self.assertEqual(last_add_fixed_ip, (None, None)) + @mock.patch.object(compute.api.API, 'add_fixed_ip') + def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip): + mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps + + body = dict(add_fixed_ip=dict()) + req = webob.Request.blank('/v3/servers/%s/action' % UUID) + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 400) + def test_remove_fixed_ip(self): global last_remove_fixed_ip last_remove_fixed_ip = (None, None) From 0621b18773b139dd8029125a0ecebe244f20e6ea Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Thu, 3 Jul 2014 15:13:42 +0200 Subject: [PATCH 234/486] Make DriverBlockDevice save() context arg optional We do not need to pass it explicitly since we wrap BlockDeviceMapping object, which hold their own context. Also remove an ugly use of get_admin_context() to get around the limitation that existed previously. Change-Id: Ide4d2e4e3fd1b456dec9e71939b63e913849c6cf --- nova/tests/virt/test_block_device.py | 5 +++++ nova/virt/block_device.py | 10 +++++++--- nova/virt/libvirt/driver.py | 2 +- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py index f34d18b120..a7b1a5aa60 100644 --- a/nova/tests/virt/test_block_device.py +++ b/nova/tests/virt/test_block_device.py @@ -213,6 +213,11 @@ def _test_driver_device(self, name): save_mock.assert_called_once_with(self.context) + # Test the save method with no context passed + with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: + test_bdm.save() + save_mock.assert_called_once_with() + def _test_driver_default_size(self, name): size = 'swap_size' if name == 'swap' else 'size' no_size_bdm = getattr(self, "%s_bdm" % name).copy() diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index c900194b1e..ad857b3827 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -129,10 +129,14 @@ def attach(self, **kwargs): """ raise NotImplementedError() - def save(self, context): + def save(self, context=None): for attr_name, key_name in self._update_on_save.iteritems(): setattr(self._bdm_obj, attr_name, self[key_name or attr_name]) - self._bdm_obj.save(context) + + if context: + self._bdm_obj.save(context) + else: + self._bdm_obj.save() class DriverSwapBlockDevice(DriverBlockDevice): @@ -268,7 +272,7 @@ def refresh_connection_info(self, context, instance, connection_info['serial'] = self.volume_id self['connection_info'] = connection_info - def save(self, context): + def save(self, context=None): # NOTE(ndipanov): we might want to generalize this by adding it to the # _update_on_save and adding a transformation function. try: diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index f75efc0a07..9540d458b6 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -3104,7 +3104,7 @@ def _get_guest_storage_config(self, instance, image_meta, cfg = self._connect_volume(connection_info, info) devices.append(cfg) vol['connection_info'] = connection_info - vol.save(nova_context.get_admin_context()) + vol.save() if 'disk.config' in disk_mapping: diskconfig = self._get_guest_disk_config(instance, From 50c2d3d5d266e724c3f53d9a5323093a0a19b71a Mon Sep 17 00:00:00 2001 From: Paul Murray Date: Wed, 29 Jan 2014 18:05:52 +0000 Subject: [PATCH 235/486] Add extensible resources to resource tracker (2) A resource plugin extension point is added to the resource tracker to allow the types of resources allocated at the compute node to be extensible. Information maintained by these plug-ins is written to the compute_nodes table in the database. The scheduler uses the information in the compute_nodes table to determine scheduling decisions. A plugin that implements vcpu resource tracking is included and all other code for tracking vcpu has been removed. This example ensures the plugins are tested in gate jobs. This was previously merged and reverted due to a bug affecting ironic CI. The bug was pre-existing but was exposed by the that patch. This change is based on the bug fix here: Icb19148660bca542a8120ecab064551d67ac28af and the previous version of this change is here: I64108338e3c958ba1276aaf113a68861cbe286f5 Co-Authored-By: Andrea Rosa Co-Authored-By: Paul Murray This is part of: blueprint extensible-resource-tracking Change-Id: If1381f99fd7db420380288faf7b2f57553f69136 --- nova/compute/claims.py | 39 +-- nova/compute/resource_tracker.py | 37 ++- nova/compute/resources/__init__.py | 133 ++++++++ nova/compute/resources/base.py | 93 ++++++ nova/compute/resources/vcpu.py | 83 +++++ nova/compute/stats.py | 20 +- nova/tests/compute/fake_resource_tracker.py | 2 + nova/tests/compute/test_claims.py | 48 ++- nova/tests/compute/test_resource_tracker.py | 42 ++- nova/tests/compute/test_resources.py | 344 ++++++++++++++++++++ nova/tests/compute/test_stats.py | 3 - setup.cfg | 2 + 12 files changed, 761 insertions(+), 85 deletions(-) create mode 100644 nova/compute/resources/__init__.py create mode 100644 nova/compute/resources/base.py create mode 100644 nova/compute/resources/vcpu.py create mode 100644 nova/tests/compute/test_resources.py diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 046d171692..4f5356ce78 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -42,10 +42,6 @@ def disk_gb(self): def memory_mb(self): return 0 - @property - def vcpus(self): - return 0 - def __enter__(self): return self @@ -57,8 +53,8 @@ def abort(self): pass def __str__(self): - return "[Claim: %d MB memory, %d GB disk, %d VCPUS]" % (self.memory_mb, - self.disk_gb, self.vcpus) + return "[Claim: %d MB memory, %d GB disk]" % (self.memory_mb, + self.disk_gb) class Claim(NopClaim): @@ -102,10 +98,6 @@ def disk_gb(self): def memory_mb(self): return self.instance['memory_mb'] + self.overhead['memory_mb'] - @property - def vcpus(self): - return self.instance['vcpus'] - def abort(self): """Compute operation requiring claimed resources has failed or been aborted. @@ -130,18 +122,16 @@ def _claim_test(self, resources, limits=None): # unlimited: memory_mb_limit = limits.get('memory_mb') disk_gb_limit = limits.get('disk_gb') - vcpu_limit = limits.get('vcpu') msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d " - "GB, VCPUs %(vcpus)d") - params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb, - 'vcpus': self.vcpus} + "GB") + params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb} LOG.audit(msg % params, instance=self.instance) reasons = [self._test_memory(resources, memory_mb_limit), self._test_disk(resources, disk_gb_limit), - self._test_cpu(resources, vcpu_limit), self._test_pci()] + reasons = reasons + self._test_ext_resources(limits) reasons = [r for r in reasons if r is not None] if len(reasons) > 0: raise exception.ComputeResourcesUnavailable(reason= @@ -176,14 +166,9 @@ def _test_pci(self): if not can_claim: return _('Claim pci failed.') - def _test_cpu(self, resources, limit): - type_ = _("CPUs") - unit = "VCPUs" - total = resources['vcpus'] - used = resources['vcpus_used'] - requested = self.vcpus - - return self._test(type_, unit, total, used, requested, limit) + def _test_ext_resources(self, limits): + return self.tracker.ext_resources_handler.test_resources( + self.instance, limits) def _test(self, type_, unit, total, used, requested, limit): """Test if the given type of resource needed for a claim can be safely @@ -235,10 +220,6 @@ def disk_gb(self): def memory_mb(self): return self.instance_type['memory_mb'] + self.overhead['memory_mb'] - @property - def vcpus(self): - return self.instance_type['vcpus'] - def _test_pci(self): pci_requests = pci_request.get_instance_pci_requests( self.instance, 'new_') @@ -248,6 +229,10 @@ def _test_pci(self): if not claim: return _('Claim pci failed.') + def _test_ext_resources(self, limits): + return self.tracker.ext_resources_handler.test_resources( + self.instance_type, limits) + def abort(self): """Compute operation requiring claimed resources has failed or been aborted. diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 37ea921f6e..b5d25348a3 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -24,6 +24,7 @@ from nova.compute import claims from nova.compute import flavors from nova.compute import monitors +from nova.compute import resources as ext_resources from nova.compute import task_states from nova.compute import vm_states from nova import conductor @@ -46,7 +47,10 @@ help='Amount of memory in MB to reserve for the host'), cfg.StrOpt('compute_stats_class', default='nova.compute.stats.Stats', - help='Class that will manage stats for the local compute host') + help='Class that will manage stats for the local compute host'), + cfg.ListOpt('compute_resources', + default=['vcpu'], + help='The names of the extra resources to track.'), ] CONF = cfg.CONF @@ -75,6 +79,8 @@ def __init__(self, host, driver, nodename): self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self) + self.ext_resources_handler = \ + ext_resources.ResourceHandler(CONF.compute_resources) self.notifier = rpc.get_notifier() self.old_resources = {} @@ -229,12 +235,10 @@ def drop_resize_claim(self, instance, instance_type=None, prefix='new_'): instance_type = self._get_instance_type(ctxt, instance, prefix) if instance_type['id'] == itype['id']: - self.stats.update_stats_for_migration(itype, sign=-1) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(instance, sign=-1) self._update_usage(self.compute_node, itype, sign=-1) - self.compute_node['stats'] = jsonutils.dumps(self.stats) ctxt = context.get_admin_context() self._update(ctxt, self.compute_node) @@ -377,9 +381,20 @@ def _sync_compute_node(self, context, resources): LOG.info(_('Compute_service record updated for %(host)s:%(node)s') % {'host': self.host, 'node': self.nodename}) + def _write_ext_resources(self, resources): + resources['stats'] = {} + resources['stats'].update(self.stats) + self.ext_resources_handler.write_resources(resources) + def _create(self, context, values): """Create the compute node in the DB.""" # initialize load stats from existing instances: + self._write_ext_resources(values) + # NOTE(pmurray): the stats field is stored as a json string. The + # json conversion will be done automatically by the ComputeNode object + # so this can be removed when using ComputeNode. + values['stats'] = jsonutils.dumps(values['stats']) + self.compute_node = self.conductor_api.compute_node_create(context, values) @@ -449,6 +464,12 @@ def _resource_change(self, resources): def _update(self, context, values): """Persist the compute node updates to the DB.""" + self._write_ext_resources(values) + # NOTE(pmurray): the stats field is stored as a json string. The + # json conversion will be done automatically by the ComputeNode object + # so this can be removed when using ComputeNode. + values['stats'] = jsonutils.dumps(values['stats']) + if not self._resource_change(values): return if "service" in self.compute_node: @@ -475,7 +496,7 @@ def _update_usage(self, resources, usage, sign=1): resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances - resources['vcpus_used'] = self.stats.num_vcpus_used + self.ext_resources_handler.update_from_instance(usage, sign) def _update_usage_from_migration(self, context, instance, resources, migration): @@ -518,11 +539,9 @@ def _update_usage_from_migration(self, context, instance, resources, migration['old_instance_type_id']) if itype: - self.stats.update_stats_for_migration(itype) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(instance) self._update_usage(resources, itype) - resources['stats'] = jsonutils.dumps(self.stats) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps( self.pci_tracker.stats) @@ -595,7 +614,6 @@ def _update_usage_from_instance(self, resources, instance): self._update_usage(resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() - resources['stats'] = jsonutils.dumps(self.stats) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: @@ -612,12 +630,10 @@ def _update_usage_from_instances(self, resources, instances): # purge old stats and init with anything passed in by the driver self.stats.clear() self.stats.digest_stats(resources.get('stats')) - resources['stats'] = jsonutils.dumps(self.stats) # set some initial values, reserve room for host/hypervisor: resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024 resources['memory_mb_used'] = CONF.reserved_host_memory_mb - resources['vcpus_used'] = 0 resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - @@ -625,6 +641,9 @@ def _update_usage_from_instances(self, resources, instances): resources['current_workload'] = 0 resources['running_vms'] = 0 + # Reset values for extended resources + self.ext_resources_handler.reset_resources(resources, self.driver) + for instance in instances: if instance['vm_state'] == vm_states.DELETED: continue diff --git a/nova/compute/resources/__init__.py b/nova/compute/resources/__init__.py new file mode 100644 index 0000000000..cb023ea523 --- /dev/null +++ b/nova/compute/resources/__init__.py @@ -0,0 +1,133 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stevedore + +from nova.i18n import _LW +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +RESOURCE_NAMESPACE = 'nova.compute.resources' + + +class ResourceHandler(): + + def _log_missing_plugins(self, names): + for name in names: + if name not in self._mgr.names(): + LOG.warn(_LW('Compute resource plugin %s was not loaded') % + name) + + def __init__(self, names, propagate_map_exceptions=False): + """Initialise the resource handler by loading the plugins. + + The ResourceHandler uses stevedore to load the resource plugins. + The handler can handle and report exceptions raised in the plugins + depending on the value of the propagate_map_exceptions parameter. + It is useful in testing to propagate exceptions so they are exposed + as part of the test. If exceptions are not propagated they are + logged at error level. + + Any named plugins that are not located are logged. + + :param names: the list of plugins to load by name + :param propagate_map_exceptions: True indicates exceptions in the + plugins should be raised, False indicates they should be handled and + logged. + """ + self._mgr = stevedore.NamedExtensionManager( + namespace=RESOURCE_NAMESPACE, + names=names, + propagate_map_exceptions=propagate_map_exceptions, + invoke_on_load=True) + self._log_missing_plugins(names) + + def reset_resources(self, resources, driver): + """Reset the resources to their initial state. + + Each plugin is called to reset its state. The resources data provided + is initial state gathered from the hypervisor. The driver is also + provided in case the plugin needs to obtain additional information + from the driver, for example, the memory calculation obtains + the memory overhead from the driver. + + :param resources: the resources reported by the hypervisor + :param driver: the driver for the hypervisor + + :returns: None + """ + if self._mgr.extensions: + self._mgr.map_method('reset', resources, driver) + + def test_resources(self, usage, limits): + """Test the ability to support the given instance. + + Each resource plugin is called to determine if it's resource is able + to support the additional requirements of a new instance. The + plugins either return None to indicate they have sufficient resource + available or a human readable string to indicate why they can not. + + :param usage: the additional resource usage + :param limits: limits used for the calculation + + :returns: a list or return values from the plugins + """ + if not self._mgr.extensions: + return [] + + reasons = self._mgr.map_method('test', usage, limits) + return reasons + + def update_from_instance(self, usage, sign=1): + """Update the resource information to reflect the allocation for + an instance with the given resource usage. + + :param usage: the resource usage of the instance + :param sign: has value 1 or -1. 1 indicates the instance is being + added to the current usage, -1 indicates the instance is being removed. + + :returns: None + """ + if not self._mgr.extensions: + return + + if sign == 1: + self._mgr.map_method('add_instance', usage) + else: + self._mgr.map_method('remove_instance', usage) + + def write_resources(self, resources): + """Write the resource data to populate the resources. + + Each resource plugin is called to write its resource data to + resources. + + :param resources: the compute node resources + + :returns: None + """ + if self._mgr.extensions: + self._mgr.map_method('write', resources) + + def report_free_resources(self): + """Each resource plugin is called to log free resource information. + + :returns: None + """ + if not self._mgr.extensions: + return + + self._mgr.map_method('report_free') diff --git a/nova/compute/resources/base.py b/nova/compute/resources/base.py new file mode 100644 index 0000000000..aebd29fb40 --- /dev/null +++ b/nova/compute/resources/base.py @@ -0,0 +1,93 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class Resource(object): + """This base class defines the interface used for compute resource + plugins. It is not necessary to use this base class, but all compute + resource plugins must implement the abstract methods found here. + An instance of the plugin object is instantiated when it is loaded + by calling __init__() with no parameters. + """ + + @abc.abstractmethod + def reset(self, resources, driver): + """Set the resource to an initial state based on the resource + view discovered from the hypervisor. + """ + pass + + @abc.abstractmethod + def test(self, usage, limits): + """Test to see if we have sufficient resources to allocate for + an instance with the given resource usage. + + :param usage: the resource usage of the instances + :param limits: limits to apply + + :returns: None if the test passes or a string describing the reason + why the test failed + """ + pass + + @abc.abstractmethod + def add_instance(self, usage): + """Update resource information adding allocation according to the + given resource usage. + + :param usage: the resource usage of the instance being added + + :returns: None + """ + pass + + @abc.abstractmethod + def remove_instance(self, usage): + """Update resource information removing allocation according to the + given resource usage. + + :param usage: the resource usage of the instance being removed + + :returns: None + + """ + pass + + @abc.abstractmethod + def write(self, resources): + """Write resource data to populate resources. + + :param resources: the resources data to be populated + + :returns: None + """ + pass + + @abc.abstractmethod + def report_free(self): + """Log free resources. + + This method logs how much free resource is held by + the resource plugin. + + :returns: None + """ + pass diff --git a/nova/compute/resources/vcpu.py b/nova/compute/resources/vcpu.py new file mode 100644 index 0000000000..e7290a3e1a --- /dev/null +++ b/nova/compute/resources/vcpu.py @@ -0,0 +1,83 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.compute.resources import base +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class VCPU(base.Resource): + """VCPU compute resource plugin. + + This is effectively a simple counter based on the vcpu requirement of each + instance. + """ + def __init__(self): + # initialize to a 'zero' resource. + # reset will be called to set real resource values + self._total = 0 + self._used = 0 + + def reset(self, resources, driver): + # total vcpu is reset to the value taken from resources. + self._total = int(resources['vcpus']) + self._used = 0 + + def _get_requested(self, usage): + return int(usage.get('vcpus', 0)) + + def _get_limit(self, limits): + if limits and 'vcpu' in limits: + return int(limits.get('vcpu')) + + def test(self, usage, limits): + requested = self._get_requested(usage) + limit = self._get_limit(limits) + + LOG.debug('Total CPUs: %(total)d VCPUs, used: %(used).02f VCPUs' % + {'total': self._total, 'used': self._used}) + + if limit is None: + # treat resource as unlimited: + LOG.debug('CPUs limit not specified, defaulting to unlimited') + return + + free = limit - self._used + + # Oversubscribed resource policy info: + LOG.debug('CPUs limit: %(limit).02f VCPUs, free: %(free).02f VCPUs' % + {'limit': limit, 'free': free}) + + if requested > free: + return ('Free CPUs %(free).02f VCPUs < ' + 'requested %(requested)d VCPUs' % + {'free': free, 'requested': requested}) + + def add_instance(self, usage): + requested = int(usage.get('vcpus', 0)) + self._used += requested + + def remove_instance(self, usage): + requested = int(usage.get('vcpus', 0)) + self._used -= requested + + def write(self, resources): + resources['vcpus'] = self._total + resources['vcpus_used'] = self._used + + def report_free(self): + free_vcpus = self._total - self._used + LOG.debug('Free VCPUs: %s' % free_vcpus) diff --git a/nova/compute/stats.py b/nova/compute/stats.py index 559d7cba7d..550bb5355d 100644 --- a/nova/compute/stats.py +++ b/nova/compute/stats.py @@ -90,10 +90,6 @@ def num_os_type(self, os_type): key = "num_os_type_%s" % os_type return self.get(key, 0) - @property - def num_vcpus_used(self): - return self.get("num_vcpus_used", 0) - def update_stats_for_instance(self, instance): """Update stats after an instance is changed.""" @@ -108,14 +104,12 @@ def update_stats_for_instance(self, instance): self._decrement("num_task_%s" % old_state['task_state']) self._decrement("num_os_type_%s" % old_state['os_type']) self._decrement("num_proj_%s" % old_state['project_id']) - x = self.get("num_vcpus_used", 0) - self["num_vcpus_used"] = x - old_state['vcpus'] else: # new instance self._increment("num_instances") # Now update stats from the new instance state: - (vm_state, task_state, os_type, project_id, vcpus) = \ + (vm_state, task_state, os_type, project_id) = \ self._extract_state_from_instance(instance) if vm_state == vm_states.DELETED: @@ -127,16 +121,10 @@ def update_stats_for_instance(self, instance): self._increment("num_task_%s" % task_state) self._increment("num_os_type_%s" % os_type) self._increment("num_proj_%s" % project_id) - x = self.get("num_vcpus_used", 0) - self["num_vcpus_used"] = x + vcpus # save updated I/O workload in stats: self["io_workload"] = self.io_workload - def update_stats_for_migration(self, instance_type, sign=1): - x = self.get("num_vcpus_used", 0) - self["num_vcpus_used"] = x + (sign * instance_type['vcpus']) - def _decrement(self, key): x = self.get(key, 0) self[key] = x - 1 @@ -153,10 +141,8 @@ def _extract_state_from_instance(self, instance): task_state = instance['task_state'] os_type = instance['os_type'] project_id = instance['project_id'] - vcpus = instance['vcpus'] self.states[uuid] = dict(vm_state=vm_state, task_state=task_state, - os_type=os_type, project_id=project_id, - vcpus=vcpus) + os_type=os_type, project_id=project_id) - return (vm_state, task_state, os_type, project_id, vcpus) + return (vm_state, task_state, os_type, project_id) diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py index c8f1e14647..b0fec2042b 100644 --- a/nova/tests/compute/fake_resource_tracker.py +++ b/nova/tests/compute/fake_resource_tracker.py @@ -20,10 +20,12 @@ class FakeResourceTracker(resource_tracker.ResourceTracker): """Version without a DB requirement.""" def _create(self, context, values): + self._write_ext_resources(values) self.compute_node = values self.compute_node['id'] = 1 def _update(self, context, values, prune_stats=False): + self._write_ext_resources(values) self.compute_node.update(values) def _get_service(self, context): diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py index be60f54016..0df1875c17 100644 --- a/nova/tests/compute/test_claims.py +++ b/nova/tests/compute/test_claims.py @@ -25,10 +25,21 @@ from nova import test +class FakeResourceHandler(object): + test_called = False + usage_is_instance = False + + def test_resources(self, usage, limits): + self.test_called = True + self.usage_is_itype = usage.get('name') is 'fakeitype' + return [] + + class DummyTracker(object): icalled = False rcalled = False pci_tracker = pci_manager.PciDevTracker() + ext_resources_handler = FakeResourceHandler() def abort_instance_claim(self, *args, **kwargs): self.icalled = True @@ -101,9 +112,6 @@ def assertRaisesRegexp(self, re_obj, e, fn, *a, **kw): except e as ee: self.assertTrue(re.search(re_obj, str(ee))) - def test_cpu_unlimited(self): - self._claim(vcpus=100000) - def test_memory_unlimited(self): self._claim(memory_mb=99999999) @@ -113,10 +121,6 @@ def test_disk_unlimited_root(self): def test_disk_unlimited_ephemeral(self): self._claim(ephemeral_gb=999999) - def test_cpu_oversubscription(self): - limits = {'vcpu': 16} - self._claim(limits, vcpus=8) - def test_memory_with_overhead(self): overhead = {'memory_mb': 8} limits = {'memory_mb': 2048} @@ -131,11 +135,6 @@ def test_memory_with_overhead_insufficient(self): self._claim, limits=limits, overhead=overhead, memory_mb=2040) - def test_cpu_insufficient(self): - limits = {'vcpu': 16} - self.assertRaises(exception.ComputeResourcesUnavailable, - self._claim, limits=limits, vcpus=17) - def test_memory_oversubscription(self): self._claim(memory_mb=4096) @@ -162,21 +161,6 @@ def test_disk_and_memory_insufficient(self): self._claim, limits=limits, root_gb=10, ephemeral_gb=40, memory_mb=16384) - def test_disk_and_cpu_insufficient(self): - limits = {'disk_gb': 45, 'vcpu': 16} - self.assertRaisesRegexp(re.compile("disk.*vcpus", re.IGNORECASE), - exception.ComputeResourcesUnavailable, - self._claim, limits=limits, root_gb=10, ephemeral_gb=40, - vcpus=17) - - def test_disk_and_cpu_and_memory_insufficient(self): - limits = {'disk_gb': 45, 'vcpu': 16, 'memory_mb': 8192} - pat = "memory.*disk.*vcpus" - self.assertRaisesRegexp(re.compile(pat, re.IGNORECASE), - exception.ComputeResourcesUnavailable, - self._claim, limits=limits, root_gb=10, ephemeral_gb=40, - vcpus=17, memory_mb=16384) - def test_pci_pass(self): dev_dict = { 'compute_node_id': 1, @@ -224,6 +208,11 @@ def test_pci_pass_no_requests(self): self._set_pci_request(claim) claim._test_pci() + def test_ext_resources(self): + self._claim() + self.assertTrue(self.tracker.ext_resources_handler.test_called) + self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype) + def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.icalled) @@ -260,6 +249,11 @@ def _set_pci_request(self, claim): claim.instance.update( system_metadata={'new_pci_requests': jsonutils.dumps(request)}) + def test_ext_resources(self): + self._claim() + self.assertTrue(self.tracker.ext_resources_handler.test_called) + self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype) + def test_abort(self): claim = self._abort() self.assertTrue(claim.tracker.rcalled) diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 119689519f..0a5a1c0653 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -22,6 +22,7 @@ from nova.compute import flavors from nova.compute import resource_tracker +from nova.compute import resources from nova.compute import task_states from nova.compute import vm_states from nova import context @@ -47,6 +48,7 @@ FAKE_VIRT_VCPUS = 1 FAKE_VIRT_STATS = {'virt_stat': 10} FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS) +RESOURCE_NAMES = ['vcpu'] CONF = cfg.CONF @@ -165,8 +167,10 @@ def _create_compute_node(self, values=None): "current_workload": 1, "running_vms": 0, "cpu_info": None, - "stats": [{"key": "num_instances", "value": "1"}], - "hypervisor_hostname": "fakenode", + "stats": { + "num_instances": "1", + }, + "hypervisor_hostname": "fakenode", } if values: compute.update(values) @@ -319,6 +323,8 @@ def _tracker(self, host=None): driver = self._driver() tracker = resource_tracker.ResourceTracker(host, driver, node) + tracker.ext_resources_handler = \ + resources.ResourceHandler(RESOURCE_NAMES, True) return tracker @@ -574,6 +580,38 @@ def _driver(self): return FakeVirtDriver(pci_support=True) +class TrackerExtraResourcesTestCase(BaseTrackerTestCase): + + def setUp(self): + super(TrackerExtraResourcesTestCase, self).setUp() + self.driver = self._driver() + + def _driver(self): + return FakeVirtDriver() + + def test_set_empty_ext_resources(self): + resources = self.driver.get_available_resource(self.tracker.nodename) + self.assertNotIn('stats', resources) + self.tracker._write_ext_resources(resources) + self.assertIn('stats', resources) + + def test_set_extra_resources(self): + def fake_write_resources(resources): + resources['stats']['resA'] = '123' + resources['stats']['resB'] = 12 + + self.stubs.Set(self.tracker.ext_resources_handler, + 'write_resources', + fake_write_resources) + + resources = self.driver.get_available_resource(self.tracker.nodename) + self.tracker._write_ext_resources(resources) + + expected = {"resA": "123", "resB": 12} + self.assertEqual(sorted(expected), + sorted(resources['stats'])) + + class InstanceClaimTestCase(BaseTrackerTestCase): def test_update_usage_only_for_tracked(self): diff --git a/nova/tests/compute/test_resources.py b/nova/tests/compute/test_resources.py new file mode 100644 index 0000000000..db2722ccb5 --- /dev/null +++ b/nova/tests/compute/test_resources.py @@ -0,0 +1,344 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the compute extra resources framework.""" + + +from oslo.config import cfg +from stevedore import extension +from stevedore import named + +from nova.compute import resources +from nova.compute.resources import base +from nova.compute.resources import vcpu +from nova import context +from nova.i18n import _ +from nova.objects import flavor as flavor_obj +from nova import test +from nova.tests.fake_instance import fake_instance_obj + +CONF = cfg.CONF + + +class FakeResourceHandler(resources.ResourceHandler): + def __init__(self, extensions): + self._mgr = \ + named.NamedExtensionManager.make_test_instance(extensions) + + +class FakeResource(base.Resource): + + def __init__(self): + self.total_res = 0 + self.used_res = 0 + + def _get_requested(self, usage): + if 'extra_specs' not in usage: + return + if self.resource_name not in usage['extra_specs']: + return + req = usage['extra_specs'][self.resource_name] + return int(req) + + def _get_limit(self, limits): + if self.resource_name not in limits: + return + limit = limits[self.resource_name] + return int(limit) + + def reset(self, resources, driver): + self.total_res = 0 + self.used_res = 0 + + def test(self, usage, limits): + requested = self._get_requested(usage) + if not requested: + return + + limit = self._get_limit(limits) + if not limit: + return + + free = limit - self.used_res + if requested <= free: + return + else: + return (_('Free %(free)d < requested %(requested)d ') % + {'free': free, 'requested': requested}) + + def add_instance(self, usage): + requested = self._get_requested(usage) + if requested: + self.used_res += requested + + def remove_instance(self, usage): + requested = self._get_requested(usage) + if requested: + self.used_res -= requested + + def write(self, resources): + pass + + def report_free(self): + return "Free %s" % (self.total_res - self.used_res) + + +class ResourceA(FakeResource): + + def reset(self, resources, driver): + # ResourceA uses a configuration option + self.total_res = int(CONF.resA) + self.used_res = 0 + self.resource_name = 'resource:resA' + + def write(self, resources): + resources['resA'] = self.total_res + resources['used_resA'] = self.used_res + + +class ResourceB(FakeResource): + + def reset(self, resources, driver): + # ResourceB uses resource details passed in parameter resources + self.total_res = resources['resB'] + self.used_res = 0 + self.resource_name = 'resource:resB' + + def write(self, resources): + resources['resB'] = self.total_res + resources['used_resB'] = self.used_res + + +def fake_flavor_obj(**updates): + flavor = flavor_obj.Flavor() + flavor.id = 1 + flavor.name = 'fakeflavor' + flavor.memory_mb = 8000 + flavor.vcpus = 3 + flavor.root_gb = 11 + flavor.ephemeral_gb = 4 + flavor.swap = 0 + flavor.rxtx_factor = 1.0 + flavor.vcpu_weight = 1 + if updates: + flavor.update(updates) + return flavor + + +class BaseTestCase(test.TestCase): + + def _initialize_used_res_counter(self): + # Initialize the value for the used resource + for ext in self.r_handler._mgr.extensions: + ext.obj.used_res = 0 + + def setUp(self): + super(BaseTestCase, self).setUp() + + # initialize flavors and stub get_by_id to + # get flavors from here + self._flavors = {} + self.ctxt = context.get_admin_context() + + # Create a flavor without extra_specs defined + _flavor_id = 1 + _flavor = fake_flavor_obj(id=_flavor_id) + self._flavors[_flavor_id] = _flavor + + # Create a flavor with extra_specs defined + _flavor_id = 2 + requested_resA = 5 + requested_resB = 7 + requested_resC = 7 + _extra_specs = {'resource:resA': requested_resA, + 'resource:resB': requested_resB, + 'resource:resC': requested_resC} + _flavor = fake_flavor_obj(id=_flavor_id, + extra_specs=_extra_specs) + self._flavors[_flavor_id] = _flavor + + # create fake resource extensions and resource handler + _extensions = [ + extension.Extension('resA', None, ResourceA, ResourceA()), + extension.Extension('resB', None, ResourceB, ResourceB()), + ] + self.r_handler = FakeResourceHandler(_extensions) + + # Resources details can be passed to each plugin or can be specified as + # configuration options + driver_resources = {'resB': 5} + CONF.resA = '10' + + # initialise the resources + self.r_handler.reset_resources(driver_resources, None) + + def test_update_from_instance_with_extra_specs(self): + # Flavor with extra_specs + _flavor_id = 2 + sign = 1 + self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) + + expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA'] + expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB'] + self.assertEqual(int(expected_resA), + self.r_handler._mgr['resA'].obj.used_res) + self.assertEqual(int(expected_resB), + self.r_handler._mgr['resB'].obj.used_res) + + def test_update_from_instance_without_extra_specs(self): + # Flavor id without extra spec + _flavor_id = 1 + self._initialize_used_res_counter() + self.r_handler.resource_list = [] + sign = 1 + self.r_handler.update_from_instance(self._flavors[_flavor_id], sign) + self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res) + self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res) + + def test_write_resources(self): + self._initialize_used_res_counter() + extra_resources = {} + expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0} + self.r_handler.write_resources(extra_resources) + self.assertEqual(expected, extra_resources) + + def test_test_resources_without_extra_specs(self): + limits = {} + # Flavor id without extra_specs + flavor = self._flavors[1] + result = self.r_handler.test_resources(flavor, limits) + self.assertEqual([None, None], result) + + def test_test_resources_with_limits_for_different_resource(self): + limits = {'resource:resC': 20} + # Flavor id with extra_specs + flavor = self._flavors[2] + result = self.r_handler.test_resources(flavor, limits) + self.assertEqual([None, None], result) + + def test_passing_test_resources(self): + limits = {'resource:resA': 10, 'resource:resB': 20} + # Flavor id with extra_specs + flavor = self._flavors[2] + self._initialize_used_res_counter() + result = self.r_handler.test_resources(flavor, limits) + self.assertEqual([None, None], result) + + def test_failing_test_resources_for_single_resource(self): + limits = {'resource:resA': 4, 'resource:resB': 20} + # Flavor id with extra_specs + flavor = self._flavors[2] + self._initialize_used_res_counter() + result = self.r_handler.test_resources(flavor, limits) + expected = ['Free 4 < requested 5 ', None] + self.assertEqual(sorted(expected), + sorted(result)) + + def test_empty_resource_handler(self): + """An empty resource handler has no resource extensions, + should have no effect, and should raise no exceptions. + """ + empty_r_handler = FakeResourceHandler([]) + + resources = {} + empty_r_handler.reset_resources(resources, None) + + flavor = self._flavors[1] + sign = 1 + empty_r_handler.update_from_instance(flavor, sign) + + limits = {} + test_result = empty_r_handler.test_resources(flavor, limits) + self.assertEqual([], test_result) + + sign = -1 + empty_r_handler.update_from_instance(flavor, sign) + + extra_resources = {} + expected_extra_resources = extra_resources + empty_r_handler.write_resources(extra_resources) + self.assertEqual(expected_extra_resources, extra_resources) + + empty_r_handler.report_free_resources() + + def test_vcpu_resource_load(self): + # load the vcpu example + names = ['vcpu'] + real_r_handler = resources.ResourceHandler(names) + ext_names = real_r_handler._mgr.names() + self.assertEqual(names, ext_names) + + # check the extension loaded is the one we expect + # and an instance of the object has been created + ext = real_r_handler._mgr['vcpu'] + self.assertIsInstance(ext.obj, vcpu.VCPU) + + +class TestVCPU(test.TestCase): + + def setUp(self): + super(TestVCPU, self).setUp() + self._vcpu = vcpu.VCPU() + self._vcpu._total = 10 + self._vcpu._used = 0 + self._flavor = fake_flavor_obj(vcpus=5) + self._big_flavor = fake_flavor_obj(vcpus=20) + self._instance = fake_instance_obj(None) + + def test_reset(self): + # set vcpu values to something different to test reset + self._vcpu._total = 10 + self._vcpu._used = 5 + + driver_resources = {'vcpus': 20} + self._vcpu.reset(driver_resources, None) + self.assertEqual(20, self._vcpu._total) + self.assertEqual(0, self._vcpu._used) + + def test_add_and_remove_instance(self): + self._vcpu.add_instance(self._flavor) + self.assertEqual(10, self._vcpu._total) + self.assertEqual(5, self._vcpu._used) + + self._vcpu.remove_instance(self._flavor) + self.assertEqual(10, self._vcpu._total) + self.assertEqual(0, self._vcpu._used) + + def test_test_pass_limited(self): + result = self._vcpu.test(self._flavor, {'vcpu': 10}) + self.assertIsNone(result, 'vcpu test failed when it should pass') + + def test_test_pass_unlimited(self): + result = self._vcpu.test(self._big_flavor, {}) + self.assertIsNone(result, 'vcpu test failed when it should pass') + + def test_test_fail(self): + result = self._vcpu.test(self._flavor, {'vcpu': 2}) + expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs') + self.assertEqual(expected, result) + + def test_write(self): + resources = {'stats': {}} + self._vcpu.write(resources) + expected = { + 'vcpus': 10, + 'vcpus_used': 0, + 'stats': { + 'num_vcpus': 10, + 'num_vcpus_used': 0 + } + } + self.assertEqual(sorted(expected), + sorted(resources)) diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py index 1864ac7950..c90314b0fc 100644 --- a/nova/tests/compute/test_stats.py +++ b/nova/tests/compute/test_stats.py @@ -136,8 +136,6 @@ def test_add_stats_for_instance(self): self.assertEqual(1, self.stats["num_vm_None"]) self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING]) - self.assertEqual(10, self.stats.num_vcpus_used) - def test_calculate_workload(self): self.stats._increment("num_task_None") self.stats._increment("num_task_" + task_states.SCHEDULING) @@ -191,7 +189,6 @@ def test_update_stats_for_instance_deleted(self): self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) - self.assertEqual(0, self.stats.num_vcpus_used) def test_io_workload(self): vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED] diff --git a/setup.cfg b/setup.cfg index e804077290..20c9399e1b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,8 @@ packages = nova [entry_points] +nova.compute.resources = + vcpu = nova.compute.resources.vcpu:VCPU nova.image.download.modules = file = nova.image.download.file console_scripts = From 45b51d973d92a515982d3b715efb618daab7d538 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 31 Jul 2014 14:40:03 +0000 Subject: [PATCH 236/486] Merge BadRequest tests of "get console output" API In test_console_output, there are BadRequest tests of "get console output". Most parts of them are duplicated. This patch merges them for the readability and clarifying their purposes. Change-Id: Ia83cf1b77a7a185a195286234d7f943d0ca7537f --- .../compute/contrib/test_console_output.py | 34 +++++++------------ 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_console_output.py b/nova/tests/api/openstack/compute/contrib/test_console_output.py index d3feafb819..67684da1fe 100644 --- a/nova/tests/api/openstack/compute/contrib/test_console_output.py +++ b/nova/tests/api/openstack/compute/contrib/test_console_output.py @@ -115,15 +115,6 @@ def test_get_console_output_filtered_characters(self): expect = string.digits + string.letters + string.punctuation + ' \t\n' self.assertEqual(output, {'output': expect}) - def test_get_console_output_with_non_integer_length(self): - body = {'os-getConsoleOutput': {'length': 'NaN'}} - req = webob.Request.blank('/v2/fake/servers/1/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) - def test_get_text_console_no_instance(self): self.stubs.Set(compute_api.API, 'get', fake_get_not_found) body = {'os-getConsoleOutput': {}} @@ -148,16 +139,26 @@ def test_get_text_console_no_instance_on_get_output(self): res = req.get_response(self.app) self.assertEqual(res.status_int, 404) - def test_get_text_console_bad_body(self): - body = {} + def _get_console_output_bad_request_case(self, body): req = webob.Request.blank('/v2/fake/servers/1/action') req.method = "POST" req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" - res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + def test_get_console_output_with_non_integer_length(self): + body = {'os-getConsoleOutput': {'length': 'NaN'}} + self._get_console_output_bad_request_case(body) + + def test_get_text_console_bad_body(self): + body = {} + self._get_console_output_bad_request_case(body) + + def test_get_console_output_with_length_as_float(self): + body = {'os-getConsoleOutput': {'length': 2.5}} + self._get_console_output_bad_request_case(body) + def test_get_console_output_not_ready(self): self.stubs.Set(compute_api.API, 'get_console_output', fake_get_console_output_not_ready) @@ -170,15 +171,6 @@ def test_get_console_output_not_ready(self): res = req.get_response(self.app) self.assertEqual(res.status_int, 409) - def test_get_console_output_with_length_as_float(self): - body = {'os-getConsoleOutput': {'length': 2.5}} - req = webob.Request.blank('/v2/fake/servers/1/action') - req.method = "POST" - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) - def test_not_implemented(self): self.stubs.Set(compute_api.API, 'get_console_output', fakes.fake_not_implemented) From 52545b57d35486d104b11dc8981f688b86174115 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 23 Jul 2014 13:19:50 -0700 Subject: [PATCH 237/486] Convert network/api.py get calls to use Network object This makes the network api module use the Network objects for get() and get_all() operations. There were no tests for these methods, so this patch adds them in the process. Related to blueprint compute-manager-objects-juno Change-Id: Iea1104a111e50a796077cdf0ed5b0f4e75bfcbcb --- .../compute/contrib/os_tenant_networks.py | 4 +- nova/network/api.py | 16 ++--- nova/tests/network/test_api.py | 65 +++++++++++++++++++ 3 files changed, 75 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py index 9e28b584ae..71556b7cf5 100644 --- a/nova/api/openstack/compute/contrib/os_tenant_networks.py +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -66,7 +66,7 @@ def network_dict(network): return {"id": network.get("uuid") or network.get("id"), - "cidr": network.get("cidr"), + "cidr": str(network.get("cidr")), "label": network.get("label")} @@ -95,7 +95,7 @@ def _get_default_networks(self): def index(self, req): context = req.environ['nova.context'] authorize(context) - networks = self.network_api.get_all(context) + networks = list(self.network_api.get_all(context)) if not self._default_networks: self._refresh_default_networks() networks.extend(self._default_networks) diff --git a/nova/network/api.py b/nova/network/api.py index 2aec7e8f8e..a4599e7277 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -78,13 +78,13 @@ def get_all(self, context): belong to the user's project. """ try: - return self.db.network_get_all(context, project_only=True) + return objects.NetworkList.get_all(context, project_only=True) except exception.NoNetworksFound: return [] @wrap_check_policy def get(self, context, network_uuid): - return self.db.network_get_by_uuid(context.elevated(), network_uuid) + return objects.Network.get_by_uuid(context.elevated(), network_uuid) @wrap_check_policy def create(self, context, **kwargs): @@ -146,9 +146,9 @@ def get_vifs_by_instance(self, context, instance): instance['uuid']) for vif in vifs: if vif.get('network_id') is not None: - network = self.db.network_get(context, vif['network_id'], - project_only="allow_none") - vif['net_uuid'] = network['uuid'] + network = objects.Network.get_by_id(context, vif['network_id'], + project_only='allow_none') + vif['net_uuid'] = network.uuid return vifs @wrap_check_policy @@ -156,9 +156,9 @@ def get_vif_by_mac_address(self, context, mac_address): vif = self.db.virtual_interface_get_by_address(context, mac_address) if vif.get('network_id') is not None: - network = self.db.network_get(context, vif['network_id'], - project_only="allow_none") - vif['net_uuid'] = network['uuid'] + network = objects.Network.get_by_id(context, vif['network_id'], + project_only='allow_none') + vif['net_uuid'] = network.uuid return vif @wrap_check_policy diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index 36979e9679..dfef96781a 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -73,6 +73,71 @@ def setUp(self): self.context = context.RequestContext('fake-user', 'fake-project') + @mock.patch('nova.objects.NetworkList.get_all') + def test_get_all(self, mock_get_all): + mock_get_all.return_value = mock.sentinel.get_all + self.assertEqual(mock.sentinel.get_all, + self.network_api.get_all(self.context)) + mock_get_all.assert_called_once_with(self.context, + project_only=True) + + @mock.patch('nova.objects.NetworkList.get_all') + def test_get_all_no_networks(self, mock_get_all): + mock_get_all.side_effect = exception.NoNetworksFound + self.assertEqual([], self.network_api.get_all(self.context)) + mock_get_all.assert_called_once_with(self.context, + project_only=True) + + @mock.patch('nova.objects.Network.get_by_uuid') + def test_get(self, mock_get): + mock_get.return_value = mock.sentinel.get_by_uuid + with mock.patch.object(self.context, 'elevated') as elevated: + elevated.return_value = mock.sentinel.elevated_context + self.assertEqual(mock.sentinel.get_by_uuid, + self.network_api.get(self.context, 'fake-uuid')) + mock_get.assert_called_once_with(mock.sentinel.elevated_context, + 'fake-uuid') + + @mock.patch('nova.objects.Network.get_by_id') + @mock.patch('nova.db.virtual_interface_get_by_instance') + def test_get_vifs_by_instance(self, mock_get_by_instance, + mock_get_by_id): + mock_get_by_instance.return_value = [ + {'network_id': mock.sentinel.network_id}] + mock_get_by_id.return_value = objects.Network() + mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid + instance = objects.Instance(uuid=mock.sentinel.inst_uuid) + vifs = self.network_api.get_vifs_by_instance(self.context, + instance) + self.assertEqual(1, len(vifs)) + self.assertEqual({'network_id': mock.sentinel.network_id, + 'net_uuid': str(mock.sentinel.network_uuid)}, + vifs[0]) + mock_get_by_instance.assert_called_once_with( + self.context, str(mock.sentinel.inst_uuid)) + mock_get_by_id.assert_called_once_with(self.context, + mock.sentinel.network_id, + project_only='allow_none') + + @mock.patch('nova.objects.Network.get_by_id') + @mock.patch('nova.db.virtual_interface_get_by_address') + def test_get_vif_by_mac_address(self, mock_get_by_address, + mock_get_by_id): + mock_get_by_address.return_value = { + 'network_id': mock.sentinel.network_id} + mock_get_by_id.return_value = objects.Network( + uuid=mock.sentinel.network_uuid) + vif = self.network_api.get_vif_by_mac_address(self.context, + mock.sentinel.mac) + self.assertEqual({'network_id': mock.sentinel.network_id, + 'net_uuid': str(mock.sentinel.network_uuid)}, + vif) + mock_get_by_address.assert_called_once_with(self.context, + mock.sentinel.mac) + mock_get_by_id.assert_called_once_with(self.context, + mock.sentinel.network_id, + project_only='allow_none') + def test_allocate_for_instance_handles_macs_passed(self): # If a macs argument is supplied to the 'nova-network' API, it is just # ignored. This test checks that the call down to the rpcapi layer From f5ccf26132d7f87a603749fb78aea8926a521367 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 23 Jul 2014 13:39:27 -0700 Subject: [PATCH 238/486] Convert network/api.py fixedip calls to use FixedIP object This makes the network/api.py module use the FixedIP object for fetching fixedip information instead of direct database access. There was not complete test coverage for these calls, so it is added here. Note that this includes a change to the FixedIP object, which allows for the case that fixed_ip_get_by_floating_address() can return None. This is technically an RPC-visible change that would normally require a version bump. However, I don't think it's worth bumping the version here since this bug would cause calls to explode in a non-useful way, thus preventing callers from being able to tolerate the case where the DB call returns None. Change-Id: Id8a9c8d2d10ddfaf0990258ac496da7c48a9b28c --- nova/network/api.py | 6 +++--- nova/objects/fixed_ip.py | 3 ++- nova/tests/network/test_api.py | 27 +++++++++++++++++++++++++++ nova/tests/objects/test_fixed_ip.py | 10 +++++++++- 4 files changed, 41 insertions(+), 5 deletions(-) diff --git a/nova/network/api.py b/nova/network/api.py index a4599e7277..01bbcdc4c3 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -101,7 +101,7 @@ def disassociate(self, context, network_uuid): @wrap_check_policy def get_fixed_ip(self, context, id): - return self.db.fixed_ip_get(context, id) + return objects.FixedIP.get_by_id(context, id) @wrap_check_policy def get_fixed_ip_by_address(self, context, address): @@ -134,11 +134,11 @@ def get_floating_ips_by_fixed_address(self, context, fixed_address): @wrap_check_policy def get_instance_id_by_floating_address(self, context, address): - fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address) + fixed_ip = objects.FixedIP.get_by_floating_address(context, address) if fixed_ip is None: return None else: - return fixed_ip['instance_uuid'] + return fixed_ip.instance_uuid @wrap_check_policy def get_vifs_by_instance(self, context, instance): diff --git a/nova/objects/fixed_ip.py b/nova/objects/fixed_ip.py index 0ceaf38f67..2c815bfdb4 100644 --- a/nova/objects/fixed_ip.py +++ b/nova/objects/fixed_ip.py @@ -95,7 +95,8 @@ def get_by_address(cls, context, address, expected_attrs=None): @obj_base.remotable_classmethod def get_by_floating_address(cls, context, address): db_fixedip = db.fixed_ip_get_by_floating_address(context, address) - return cls._from_db_object(context, cls(context), db_fixedip) + if db_fixedip is not None: + return cls._from_db_object(context, cls(context), db_fixedip) @obj_base.remotable_classmethod def get_by_network_and_host(cls, context, network_id, host): diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index dfef96781a..da1e96270f 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -404,6 +404,33 @@ def test_get_fixed_ip_by_address(self, fip_get): 'fake-addr') self.assertIsInstance(fip, objects.FixedIP) + @mock.patch('nova.objects.FixedIP.get_by_id') + def test_get_fixed_ip(self, mock_get_by_id): + mock_get_by_id.return_value = mock.sentinel.fixed_ip + self.assertEqual(mock.sentinel.fixed_ip, + self.network_api.get_fixed_ip(self.context, + mock.sentinel.id)) + mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id) + + @mock.patch('nova.objects.FixedIP.get_by_floating_address') + def test_get_instance_by_floating_address(self, mock_get_by_floating): + mock_get_by_floating.return_value = objects.FixedIP( + instance_uuid = mock.sentinel.instance_uuid) + self.assertEqual(str(mock.sentinel.instance_uuid), + self.network_api.get_instance_id_by_floating_address( + self.context, mock.sentinel.floating)) + mock_get_by_floating.assert_called_once_with(self.context, + mock.sentinel.floating) + + @mock.patch('nova.objects.FixedIP.get_by_floating_address') + def test_get_instance_by_floating_address_none(self, mock_get_by_floating): + mock_get_by_floating.return_value = None + self.assertEqual(None, + self.network_api.get_instance_id_by_floating_address( + self.context, mock.sentinel.floating)) + mock_get_by_floating.assert_called_once_with(self.context, + mock.sentinel.floating) + @mock.patch('nova.network.api.API') @mock.patch('nova.db.instance_info_cache_update') diff --git a/nova/tests/objects/test_fixed_ip.py b/nova/tests/objects/test_fixed_ip.py index 04dd4d2f49..d34cd3883c 100644 --- a/nova/tests/objects/test_fixed_ip.py +++ b/nova/tests/objects/test_fixed_ip.py @@ -131,13 +131,21 @@ def test_get_by_address_with_extras_deleted_instance(self, instance_get, self.assertFalse(instance_get.called) @mock.patch('nova.db.fixed_ip_get_by_floating_address') - def test_get_by_floating_ip(self, get): + def test_get_by_floating_address(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4') self._compare(fixedip, fake_fixed_ip) + @mock.patch('nova.db.fixed_ip_get_by_floating_address') + def test_get_by_floating_address_none(self, get): + get.return_value = None + fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, + '1.2.3.4') + get.assert_called_once_with(self.context, '1.2.3.4') + self.assertIsNone(fixedip) + @mock.patch('nova.db.fixed_ip_get_by_network_host') def test_get_by_network_and_host(self, get): get.return_value = fake_fixed_ip From e24f333cd5d3667a15c36ac502304bfb5a0e21df Mon Sep 17 00:00:00 2001 From: Daniel Genin Date: Mon, 28 Jul 2014 17:12:40 -0400 Subject: [PATCH 239/486] Raises NotImplementedError for LVM migration. Currently migration of an LVM backed instance results in a new instance being launched on the destination node, but the instance disk is neither copied to the destination nor deleted on the origin node. The problem is addressed by raising a NotImplementedError. Closes-Bug: #1270305 Closes-Bug: #1245595 Closes-Bug: #1241866 Change-Id: I8010230b1aa5ddc322d0c93dd916b7700c25ab81 DocImpact --- nova/compute/manager.py | 5 ++++ nova/tests/virt/libvirt/test_driver.py | 40 ++++++++++++++++++++++++++ nova/virt/libvirt/driver.py | 6 ++++ 3 files changed, 51 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 98f928111b..c074bd35c7 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3463,6 +3463,11 @@ def prep_resize(self, context, image, instance, instance_type, instance_type, quotas, request_spec, filter_properties, node) + # NOTE(dgenin): This is thrown in LibvirtDriver when the + # instance to be migrated is backed by LVM. + # Remove when LVM migration is implemented. + except exception.MigrationPreCheckError: + raise except Exception: # try to re-schedule the resize elsewhere: exc_info = sys.exc_info() diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index bb2c5266d1..27544adc28 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -9380,6 +9380,46 @@ def fake_execute(*args, **kwargs): self.assertFalse(self.copy_or_move_swap_called) self.assertEqual(disk_info_text, out) + def test_migrate_disk_and_power_off_lvm(self): + """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection + .migrate_disk_and_power_off. + """ + + self.flags(images_type='lvm', group='libvirt') + disk_info = [{'type': 'raw', 'path': '/dev/vg/disk', + 'disk_size': '83886080'}, + {'type': 'raw', 'path': '/dev/disk.local', + 'disk_size': '83886080'}] + disk_info_text = jsonutils.dumps(disk_info) + + def fake_get_instance_disk_info(instance, xml=None, + block_device_info=None): + return disk_info_text + + def fake_destroy(instance): + pass + + def fake_get_host_ip_addr(): + return '10.0.0.1' + + def fake_execute(*args, **kwargs): + pass + + self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info', + fake_get_instance_disk_info) + self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy) + self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr', + fake_get_host_ip_addr) + self.stubs.Set(utils, 'execute', fake_execute) + + ins_ref = self._create_instance() + flavor = {'root_gb': 10, 'ephemeral_gb': 20} + + # Migration is not implemented for LVM backed instances + self.assertRaises(exception.MigrationPreCheckError, + self.libvirtconnection.migrate_disk_and_power_off, + None, ins_ref, '10.0.0.1', flavor, None) + def test_migrate_disk_and_power_off_resize_error(self): instance = self._create_instance() flavor = {'root_gb': 5} diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 6c39391ac2..5154213a5f 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -5096,6 +5096,12 @@ def migrate_disk_and_power_off(self, context, instance, dest, block_device_info=block_device_info) disk_info = jsonutils.loads(disk_info_text) + # NOTE(dgenin): Migration is not implemented for LVM backed instances. + if (CONF.libvirt.images_type == 'lvm' and + not self._is_booted_from_volume(instance, disk_info_text)): + reason = "Migration is not supported for LVM backed instances" + raise exception.MigrationPreCheckError(reason) + # copy disks to destination # rename instance dir to +_resize at first for using # shared storage for instance dir (eg. NFS). From d8c19e7823096bcdbc714e81fafabffe8a70a22e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 17 Jun 2014 10:31:36 -0700 Subject: [PATCH 240/486] Fix ownership checking in get_networks_by_uuid The code was elevating context before requesting networks which means that the project_only code is skipped and all networks could be retrieved. This means that the default networks returned by FlatDhcpManager could include networks that belong to other projects. This fixes the issue by requesting the network without elevating the context, re-enabling the proper project checking. It includes tests to verify that the proper exception is raised when an illegal network is requested and that the context has not been elevated by the compute manager. Partially-implements blueprint better-support-for-multiple-networks Change-Id: Icd3bc521003725cc3da9875dfd6532d5c5524f43 Closes-Bug: 1331092 --- nova/compute/manager.py | 5 +-- nova/network/floating_ips.py | 1 + nova/network/manager.py | 15 ++++---- nova/tests/compute/test_compute.py | 7 +++- nova/tests/compute/test_compute_mgr.py | 47 ++++++++++++++++++++++++++ nova/tests/network/test_manager.py | 39 ++++++++++++++++++--- 6 files changed, 100 insertions(+), 14 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f1e744f340..e8c859f2e7 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1290,6 +1290,7 @@ def _do_validation(context, instance, group_hint): def _build_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec): + original_context = context context = context.elevated() # If neutron security groups pass requested security @@ -1323,8 +1324,8 @@ def _build_instance(self, context, request_spec, filter_properties, macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) - network_info = self._allocate_network(context, instance, - requested_networks, macs, security_groups, + network_info = self._allocate_network(original_context, + instance, requested_networks, macs, security_groups, dhcp_options) self._instance_update( diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py index 9782f673f2..02beaf122b 100644 --- a/nova/network/floating_ips.py +++ b/nova/network/floating_ips.py @@ -112,6 +112,7 @@ def allocate_for_instance(self, context, **kwargs): nw_info = super(FloatingIP, self).allocate_for_instance(context, **kwargs) if CONF.auto_assign_floating_ip: + context = context.elevated() # allocate a floating ip floating_address = self.allocate_floating_ip(context, project_id, True) diff --git a/nova/network/manager.py b/nova/network/manager.py index cf6622efa4..0f32ca7d39 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -479,7 +479,7 @@ def allocate_for_instance(self, context, **kwargs): admin_context = context.elevated() LOG.debug("Allocate network for instance", instance_uuid=instance_uuid, context=context) - networks = self._get_networks_for_instance(admin_context, + networks = self._get_networks_for_instance(context, instance_uuid, project_id, requested_networks=requested_networks) networks_list = [self._get_network_dict(network) @@ -488,8 +488,8 @@ def allocate_for_instance(self, context, **kwargs): networks_list, context=context, instance_uuid=instance_uuid) try: - self._allocate_mac_addresses(context, instance_uuid, networks, - macs) + self._allocate_mac_addresses(admin_context, instance_uuid, + networks, macs) except Exception: with excutils.save_and_reraise_exception(): # If we fail to allocate any one mac address, clean up all @@ -505,8 +505,8 @@ def allocate_for_instance(self, context, **kwargs): network_ids = [network['id'] for network in networks] self.network_rpcapi.update_dns(context, network_ids) - return self.get_instance_nw_info(context, instance_uuid, rxtx_factor, - host) + return self.get_instance_nw_info(admin_context, instance_uuid, + rxtx_factor, host) def deallocate_for_instance(self, context, **kwargs): """Handles deallocating various network resources for an instance. @@ -1934,8 +1934,9 @@ def _get_networks_for_instance(self, context, instance_id, project_id, network_uuids = [uuid for (uuid, fixed_ip) in requested_networks] networks = self._get_networks_by_uuids(context, network_uuids) else: - networks = objects.NetworkList.get_by_project(context, - project_id) + # NOTE(vish): Allocates network on demand so requires admin. + networks = objects.NetworkList.get_by_project( + context.elevated(), project_id) return networks def create_networks(self, context, **kwargs): diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index c5e17014eb..706a1ad7ee 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -251,8 +251,13 @@ def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs): self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) + + def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs): + self.assertFalse(ctxt.is_admin) + return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) + self.stubs.Set(network_api.API, 'allocate_for_instance', - fake_get_nw_info) + fake_allocate_for_instance) self.compute_api = compute.API() # Just to make long lines short diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 7d7a4e367e..fc54830d85 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -98,6 +98,53 @@ def test_allocate_network_succeeds_after_retries(self): dhcp_options) self.assertEqual(final_result, res) + def test_allocate_network_maintains_context(self): + # override tracker with a version that doesn't need the database: + class FakeResourceTracker(object): + def instance_claim(self, context, instance, limits): + return mox.MockAnything() + + self.mox.StubOutWithMock(self.compute, '_get_resource_tracker') + self.mox.StubOutWithMock(self.compute, '_allocate_network') + self.mox.StubOutWithMock(self.compute, '_instance_update') + self.mox.StubOutWithMock(objects.BlockDeviceMappingList, + 'get_by_instance_uuid') + + instance = fake_instance.fake_db_instance(system_metadata={}) + + objects.BlockDeviceMappingList.get_by_instance_uuid( + mox.IgnoreArg(), instance['uuid']).AndReturn([]) + + node = 'fake_node' + self.compute._get_resource_tracker(node).AndReturn( + FakeResourceTracker()) + + self.admin_context = False + + def fake_allocate(context, *args, **kwargs): + if context.is_admin: + self.admin_context = True + + # NOTE(vish): The nice mox parameter matchers here don't work well + # because they raise an exception that gets wrapped by + # the retry exception handling, so use a side effect + # to keep track of whether allocate was called with admin + # context. + self.compute._allocate_network(mox.IgnoreArg(), instance, + mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()).WithSideEffects(fake_allocate) + self.compute._instance_update(self.context, instance['uuid'], + system_metadata={'network_allocated': 'True'}) + + self.mox.ReplayAll() + + self.compute._build_instance(self.context, {}, {}, + None, None, None, True, + node, instance, + {}, False) + self.assertFalse(self.admin_context, + "_allocate_network called with admin context") + def test_allocate_network_fails(self): self.flags(network_allocate_retries=0) diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index f36a4ebb28..2146c3dc9e 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -2390,6 +2390,8 @@ class TestFloatingIPManager(floating_ips.FloatingIP, class AllocateTestCase(test.TestCase): def setUp(self): super(AllocateTestCase, self).setUp() + dns = 'nova.network.noop_dns_driver.NoopDNSDriver' + self.flags(instance_dns_manager=dns) self.useFixture(test.SampleNetworks()) self.conductor = self.start_service( 'conductor', manager=CONF.conductor.manager) @@ -2401,6 +2403,8 @@ def setUp(self): self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) + self.user_context = context.RequestContext('testuser', + 'testproject') def test_allocate_for_instance(self): address = "10.10.10.10" @@ -2419,8 +2423,8 @@ def test_allocate_for_instance(self): for network in networks: db.network_update(self.context, network['id'], {'host': self.network.host}) - project_id = self.context.project_id - nw_info = self.network.allocate_for_instance(self.context, + project_id = self.user_context.project_id + nw_info = self.network.allocate_for_instance(self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=None) @@ -2430,6 +2434,32 @@ def test_allocate_for_instance(self): self.network.deallocate_for_instance(self.context, instance=inst) + def test_allocate_for_instance_illegal_network(self): + networks = db.network_get_all(self.context) + requested_networks = [] + for network in networks: + # set all networks to other projects + db.network_update(self.context, network['id'], + {'host': self.network.host, + 'project_id': 'otherid'}) + requested_networks.append((network['uuid'], None)) + # set the first network to our project + db.network_update(self.context, networks[0]['id'], + {'project_id': self.user_context.project_id}) + + inst = objects.Instance() + inst.host = self.compute.host + inst.display_name = HOST + inst.instance_type_id = 1 + inst.uuid = FAKEUUID + inst.create(self.context) + self.assertRaises(exception.NetworkNotFoundForProject, + self.network.allocate_for_instance, self.user_context, + instance_id=inst['id'], instance_uuid=inst['uuid'], + host=inst['host'], vpn=None, rxtx_factor=3, + project_id=self.context.project_id, macs=None, + requested_networks=requested_networks) + def test_allocate_for_instance_with_mac(self): available_macs = set(['ca:fe:de:ad:be:ef']) inst = db.instance_create(self.context, {'host': self.compute.host, @@ -2440,7 +2470,7 @@ def test_allocate_for_instance_with_mac(self): db.network_update(self.context, network['id'], {'host': self.network.host}) project_id = self.context.project_id - nw_info = self.network.allocate_for_instance(self.context, + nw_info = self.network.allocate_for_instance(self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) @@ -2463,7 +2493,8 @@ def test_allocate_for_instance_not_enough_macs(self): {'host': self.network.host}) project_id = self.context.project_id self.assertRaises(exception.VirtualInterfaceCreateException, - self.network.allocate_for_instance, self.context, + self.network.allocate_for_instance, + self.user_context, instance_id=inst['id'], instance_uuid=inst['uuid'], host=inst['host'], vpn=None, rxtx_factor=3, project_id=project_id, macs=available_macs) From 138242a0b30480ae42f7160d5c3d98dddfe52d8d Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Wed, 23 Jul 2014 13:07:20 -0700 Subject: [PATCH 241/486] Remove duplicate index from block_device_mapping table Migration 186 (doesn't exist anymore) deleted the column virtual_name from the the block_device_mapping table. This ended up modifying an index (block_device_mapping_instance_uuid_virtual_name_device_name_idx) to be identical to (block_device_mapping_instance_uuid_device_name_idx) Closes-Bug: 1265839 Change-Id: Ib293220eb764e9810212d121ce7889e6aefc266e --- .../versions/249_remove_duplicate_index.py | 36 +++++++++++++++++++ nova/tests/db/test_migrations.py | 17 +++++++++ 2 files changed, 53 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py b/nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py new file mode 100644 index 0000000000..9d2f797c9c --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py @@ -0,0 +1,36 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import MetaData, Table + + +INDEX_NAME = 'block_device_mapping_instance_uuid_virtual_name_device_name_idx' + + +def upgrade(migrate_engine): + """Remove duplicate index from block_device_mapping table.""" + + meta = MetaData(bind=migrate_engine) + + bdm = Table('block_device_mapping', meta, autoload=True) + for index in bdm.indexes: + if index.name == INDEX_NAME: + index.drop() + + +def downgrade(migrate_engine): + # Unnecessary to re-add duplicate index when downgrading + pass diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index c7acc97ef6..f0cf594b60 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -747,6 +747,23 @@ def _post_downgrade_248(self, engine): index_names = [idx.name for idx in reservations.indexes] self.assertNotIn('reservations_deleted_expire_idx', index_names) + def _check_249(self, engine, data): + # Assert that only one index exists that covers columns + # instance_uuid and device_name + bdm = oslodbutils.get_table(engine, 'block_device_mapping') + self.assertEqual(1, len([i for i in bdm.indexes + if [c.name for c in i.columns] == + ['instance_uuid', 'device_name']])) + + def _post_downgrade_249(self, engine): + # The duplicate index is not created on downgrade, so this + # asserts that only one index exists that covers columns + # instance_uuid and device_name + bdm = oslodbutils.get_table(engine, 'block_device_mapping') + self.assertEqual(1, len([i for i in bdm.indexes + if [c.name for c in i.columns] == + ['instance_uuid', 'device_name']])) + class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" From 3085aa183d9602c3725ca699b7f1a92a4a4a5226 Mon Sep 17 00:00:00 2001 From: oleksii Date: Tue, 8 Jul 2014 13:44:25 -0700 Subject: [PATCH 242/486] Update scheduler after instance delete Previously deleting an instance would update the scheduler resources fairly quickly. There is now a delay when deleting an instance until the scheduler makes the resources available again. This appears to be due to the fact that the delete code path used to call resource tracker to update the compute_node record but this no longer happens. Closes-Bug: 1336080 Change-Id: I837b70ce06843ff83481d4985c9c08b74d20cab0 --- nova/compute/manager.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 93a752c9c2..3c7c1671c0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2290,6 +2290,7 @@ def _delete_instance(self, context, instance, bdms, quotas): instance.task_state = None instance.terminated_at = timeutils.utcnow() instance.save() + self._update_resource_tracker(context, instance) system_meta = instance.system_metadata instance.destroy() except Exception: From 5c85ee80b47560b1a6ffcb48872700da96fba9f2 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 24 Jul 2014 21:50:11 -0400 Subject: [PATCH 243/486] docs - Fix docstring issues fix identation, add line before block comment ends, remove extra colon (:) to remove ERROR(s) and WARNING(s) Change-Id: Iabb09867bf6068da35251f35f9f689a623420692 --- nova/compute/manager.py | 14 ++++++++------ nova/utils.py | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f1e744f340..05acaecf94 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -529,13 +529,14 @@ def wait_for_instance_event(self, instance, event_names, deadline=300, waiting for the rest of the events, False to stop processing, or raise an exception which will bubble up to the waiter. - :param:instance: The instance for which an event is expected - :param:event_names: A list of event names. Each element can be a + :param instance: The instance for which an event is expected + :param event_names: A list of event names. Each element can be a string event name or tuple of strings to indicate (name, tag). - :param:deadline: Maximum number of seconds we should wait for all + :param deadline: Maximum number of seconds we should wait for all of the specified events to arrive. - :param:error_callback: A function to be called if an event arrives + :param error_callback: A function to be called if an event arrives + """ if error_callback is None: @@ -4631,8 +4632,9 @@ def pre_live_migration(self, context, instance, block_migration, disk, :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration - :param migrate_data : if not None, it is a dict which holds data - required for live migration without shared storage. + :param migrate_data: if not None, it is a dict which holds data + required for live migration without shared + storage. """ block_device_info = self._get_instance_block_device_info( diff --git a/nova/utils.py b/nova/utils.py index 2f41914a3c..09928eec45 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -574,7 +574,7 @@ def monkey_patch(): using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: - 'nova.api.ec2.cloud:nova.notifications.notify_decorator' + 'nova.api.ec2.cloud:nova.notifications.notify_decorator' Parameters of the decorator is as follows. (See nova.notifications.notify_decorator) From e5c3ff67b51f7c466d89778cc2c4999e10c7e08e Mon Sep 17 00:00:00 2001 From: liyingjun Date: Thu, 26 Jun 2014 21:48:52 +0800 Subject: [PATCH 244/486] EndpointNotFound deleting volume backend instance When there is some error in volume creating on booting from volume, we may get the EndpointNotFound exception when deleting the error instance. Then we can never delete the instance successfully. The exception should be ignored. Change-Id: Ia92b648eb18c70996747d361e227de771f33ee9c Closes-bug: 1346866 --- nova/compute/manager.py | 4 ++++ nova/tests/compute/test_compute_mgr.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b03cf79e48..3a61af3bd8 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -34,6 +34,7 @@ import traceback import uuid +from cinderclient import exceptions as cinder_exception import eventlet.event from eventlet import greenthread import eventlet.timeout @@ -2246,6 +2247,9 @@ def _shutdown_instance(self, context, instance, except exception.VolumeNotFound as exc: LOG.warn(_('Ignoring VolumeNotFound: %s') % exc, instance=instance) + except cinder_exception.EndpointNotFound as exc: + LOG.warn(_LW('Ignoring EndpointNotFound: %s'), exc, + instance=instance) if notify: self._notify_about_instance_usage(context, instance, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 1d7f5171c2..aee3c9e2b5 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -15,6 +15,7 @@ import contextlib import time +from cinderclient import exceptions as cinder_exception from eventlet import event as eventlet_event import mock import mox @@ -504,6 +505,26 @@ def test_init_instance_deletes_error_deleting_instance(self): self.compute._init_instance(self.context, instance) self.mox.VerifyAll() + @mock.patch('nova.context.RequestContext.elevated') + @mock.patch('nova.compute.utils.get_nw_info_for_instance') + @mock.patch( + 'nova.compute.manager.ComputeManager._get_instance_block_device_info') + @mock.patch('nova.virt.driver.ComputeDriver.destroy') + @mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector') + def test_shutdown_instance_endpoint_not_found(self, mock_connector, + mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated): + mock_connector.side_effect = cinder_exception.EndpointNotFound + mock_elevated.return_value = self.context + instance = fake_instance.fake_instance_obj( + self.context, + uuid='fake', + vm_state=vm_states.ERROR, + task_state=task_states.DELETING) + bdms = [mock.Mock(id=1, is_volume=True)] + + self.compute._shutdown_instance(self.context, instance, bdms, + notify=False, try_deallocate_networks=False) + def _test_init_instance_retries_reboot(self, instance, reboot_type, return_power_state): with contextlib.nested( From 53c1794b59d8ae050241bbfe5a9a6bea63b87b4c Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 26 Jun 2014 15:34:06 -0700 Subject: [PATCH 245/486] Import Ironic scheduler filters and host manager This is an import of the Ironic scheduler changes as of commit da967d77894be6f23d81fb5cc948f9d13898ba84 implements bp: add-ironic-driver Co-authored-by: Adam Gandelman Co-authored-by: ChangBo Guo(gcb) Co-authored-by: Chris Behrens Co-authored-by: Chris Krelle Co-authored-by: Devananda van der Veen Co-authored-by: Fengqian Gao Co-authored-by: Hans Lindgren Co-authored-by: Jenkins Co-authored-by: Lucas Alvares Gomes Co-authored-by: Michael Davies Co-authored-by: Rohan Kanade Co-authored-by: Zhongyue Luo Change-Id: I358d9c0485c5dcf81498871faa9150e3bf167c6b --- nova/scheduler/baremetal_host_manager.py | 54 +-- nova/scheduler/base_baremetal_host_manager.py | 57 +++ nova/scheduler/filters/exact_core_filter.py | 51 +++ nova/scheduler/filters/exact_disk_filter.py | 41 ++ nova/scheduler/filters/exact_ram_filter.py | 38 ++ nova/scheduler/ironic_host_manager.py | 93 ++++ nova/tests/scheduler/ironic_fakes.py | 75 ++++ .../scheduler/test_ironic_host_manager.py | 412 ++++++++++++++++++ 8 files changed, 778 insertions(+), 43 deletions(-) create mode 100644 nova/scheduler/base_baremetal_host_manager.py create mode 100644 nova/scheduler/filters/exact_core_filter.py create mode 100644 nova/scheduler/filters/exact_disk_filter.py create mode 100644 nova/scheduler/filters/exact_ram_filter.py create mode 100644 nova/scheduler/ironic_host_manager.py create mode 100644 nova/tests/scheduler/ironic_fakes.py create mode 100644 nova/tests/scheduler/test_ironic_host_manager.py diff --git a/nova/scheduler/baremetal_host_manager.py b/nova/scheduler/baremetal_host_manager.py index be59575d59..2869af506b 100644 --- a/nova/scheduler/baremetal_host_manager.py +++ b/nova/scheduler/baremetal_host_manager.py @@ -18,57 +18,25 @@ Manage hosts in the current zone. """ -from nova.openstack.common import jsonutils +import nova.scheduler.base_baremetal_host_manager as bbhm from nova.scheduler import host_manager -class BaremetalNodeState(host_manager.HostState): +class BaremetalNodeState(bbhm.BaseBaremetalNodeState): """Mutable and immutable information tracked for a host. This is an attempt to remove the ad-hoc data structures previously used and lock down access. """ + pass - def update_from_compute_node(self, compute): - """Update information about a host from its compute_node info.""" - all_ram_mb = compute['memory_mb'] - free_disk_mb = compute['free_disk_gb'] * 1024 - free_ram_mb = compute['free_ram_mb'] - - self.free_ram_mb = free_ram_mb - self.total_usable_ram_mb = all_ram_mb - self.free_disk_mb = free_disk_mb - self.vcpus_total = compute['vcpus'] - self.vcpus_used = compute['vcpus_used'] - - stats = compute.get('stats', '{}') - self.stats = jsonutils.loads(stats) - - def consume_from_instance(self, instance): - self.free_ram_mb = 0 - self.free_disk_mb = 0 - self.vcpus_used = self.vcpus_total - - -def new_host_state(self, host, node, **kwargs): - """Returns an instance of BaremetalNodeState or HostState according to - compute['cpu_info']. If 'cpu_info' equals 'baremetal cpu', it returns an - instance of BaremetalNodeState. If not, returns an instance of HostState. - """ - compute = kwargs.get('compute') - - if compute and compute.get('cpu_info') == 'baremetal cpu': - return BaremetalNodeState(host, node, **kwargs) - else: - return host_manager.HostState(host, node, **kwargs) - - -class BaremetalHostManager(host_manager.HostManager): +class BaremetalHostManager(bbhm.BaseBaremetalHostManager): """Bare-Metal HostManager class.""" - # Override. - # Yes, this is not a class, and it is OK - host_state_cls = new_host_state - - def __init__(self): - super(BaremetalHostManager, self).__init__() + def host_state_cls(self, host, node, **kwargs): + """Factory function/property to create a new HostState.""" + compute = kwargs.get('compute') + if compute and compute.get('cpu_info') == 'baremetal cpu': + return BaremetalNodeState(host, node, **kwargs) + else: + return host_manager.HostState(host, node, **kwargs) diff --git a/nova/scheduler/base_baremetal_host_manager.py b/nova/scheduler/base_baremetal_host_manager.py new file mode 100644 index 0000000000..99baba117b --- /dev/null +++ b/nova/scheduler/base_baremetal_host_manager.py @@ -0,0 +1,57 @@ +# Copyright (c) 2012 NTT DOCOMO, INC. +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Manage hosts in the current zone. +""" + +from nova.openstack.common import jsonutils +from nova.scheduler import host_manager + + +class BaseBaremetalNodeState(host_manager.HostState): + """Mutable and immutable information tracked for a host. + This is an attempt to remove the ad-hoc data structures + previously used and lock down access. + """ + + def update_from_compute_node(self, compute): + """Update information about a host from its compute_node info.""" + self.vcpus_total = compute['vcpus'] + self.vcpus_used = compute['vcpus_used'] + + self.free_ram_mb = compute['free_ram_mb'] + self.total_usable_ram_mb = compute['memory_mb'] + self.free_disk_mb = compute['free_disk_gb'] * 1024 + + stats = compute.get('stats', '{}') + self.stats = jsonutils.loads(stats) + + def consume_from_instance(self, instance): + """Consume nodes entire resources regardless of instance request.""" + self.free_ram_mb = 0 + self.free_disk_mb = 0 + self.vcpus_used = self.vcpus_total + + +class BaseBaremetalHostManager(host_manager.HostManager): + """Base class for Baremetal and Ironic HostManager classes.""" + + def host_state_cls(self, host, node, **kwargs): + """Factory function to create a new HostState. May be overridden + in subclasses to extend functionality. + """ + return BaseBaremetalNodeState(host, node, **kwargs) diff --git a/nova/scheduler/filters/exact_core_filter.py b/nova/scheduler/filters/exact_core_filter.py new file mode 100644 index 0000000000..fbe718ebf8 --- /dev/null +++ b/nova/scheduler/filters/exact_core_filter.py @@ -0,0 +1,51 @@ +# Copyright (c) 2014 OpenStack Foundation +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.i18n import _ +from nova.openstack.common import log as logging +from nova.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class ExactCoreFilter(filters.BaseHostFilter): + """Exact Core Filter.""" + + def host_passes(self, host_state, filter_properties): + """Return True if host has the exact number of CPU cores.""" + instance_type = filter_properties.get('instance_type') + if not instance_type: + return True + + if not host_state.vcpus_total: + # Fail safe + LOG.warning(_("VCPUs not set; assuming CPU collection broken")) + return False + + required_vcpus = instance_type['vcpus'] + usable_vcpus = host_state.vcpus_total - host_state.vcpus_used + + if required_vcpus != usable_vcpus: + LOG.debug("%(host_state)s does not have exactly " + "%(requested_vcpus)s cores of usable vcpu, it has " + "%(usable_vcpus)s.", + {'host_state': host_state, + 'requested_vcpus': required_vcpus, + 'usable_vcpus': usable_vcpus}) + return False + + return True diff --git a/nova/scheduler/filters/exact_disk_filter.py b/nova/scheduler/filters/exact_disk_filter.py new file mode 100644 index 0000000000..543eb4c75e --- /dev/null +++ b/nova/scheduler/filters/exact_disk_filter.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.openstack.common import log as logging +from nova.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class ExactDiskFilter(filters.BaseHostFilter): + """Exact Disk Filter.""" + + def host_passes(self, host_state, filter_properties): + """Return True if host has the exact amount of disk available.""" + instance_type = filter_properties.get('instance_type') + requested_disk = (1024 * (instance_type['root_gb'] + + instance_type['ephemeral_gb']) + + instance_type['swap']) + + if requested_disk != host_state.free_disk_mb: + LOG.debug("%(host_state)s does not have exactly " + "%(requested_disk)s MB usable disk, it " + "has %(usable_disk_mb)s.", + {'host_state': host_state, + 'requested_disk': requested_disk, + 'usable_disk_mb': host_state.free_disk_mb}) + return False + + return True diff --git a/nova/scheduler/filters/exact_ram_filter.py b/nova/scheduler/filters/exact_ram_filter.py new file mode 100644 index 0000000000..efd845aa6b --- /dev/null +++ b/nova/scheduler/filters/exact_ram_filter.py @@ -0,0 +1,38 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.openstack.common import log as logging +from nova.scheduler import filters + +LOG = logging.getLogger(__name__) + + +class ExactRamFilter(filters.BaseHostFilter): + """Exact RAM Filter.""" + + def host_passes(self, host_state, filter_properties): + """Return True if host has the exact amount of RAM available.""" + instance_type = filter_properties.get('instance_type') + requested_ram = instance_type['memory_mb'] + if requested_ram != host_state.free_ram_mb: + LOG.debug("%(host_state)s does not have exactly " + "%(requested_ram)s MB usable RAM, it has " + "%(usable_ram)s.", + {'host_state': host_state, + 'requested_ram': requested_ram, + 'usable_ram': host_state.free_ram_mb}) + return False + + return True diff --git a/nova/scheduler/ironic_host_manager.py b/nova/scheduler/ironic_host_manager.py new file mode 100644 index 0000000000..409c6dd1cc --- /dev/null +++ b/nova/scheduler/ironic_host_manager.py @@ -0,0 +1,93 @@ +# Copyright (c) 2012 NTT DOCOMO, INC. +# Copyright (c) 2011-2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Ironic host manager. + +This host manager will consume all cpu's, disk space, and +ram from a host / node as it is supporting Baremetal hosts, which can not be +subdivided into multiple instances. +""" +from oslo.config import cfg + +from nova.openstack.common import log as logging +from nova.openstack.common import timeutils +import nova.scheduler.base_baremetal_host_manager as bbhm +from nova.scheduler import host_manager + +host_manager_opts = [ + cfg.ListOpt('baremetal_scheduler_default_filters', + default=[ + 'RetryFilter', + 'AvailabilityZoneFilter', + 'ComputeFilter', + 'ComputeCapabilitiesFilter', + 'ImagePropertiesFilter', + 'ExactRamFilter', + 'ExactDiskFilter', + 'ExactCoreFilter', + ], + help='Which filter class names to use for filtering ' + 'baremetal hosts when not specified in the request.'), + cfg.BoolOpt('scheduler_use_baremetal_filters', + default=False, + help='Flag to decide whether to use ' + 'baremetal_scheduler_default_filters or not.'), + + ] + +CONF = cfg.CONF +CONF.register_opts(host_manager_opts) + +LOG = logging.getLogger(__name__) + + +class IronicNodeState(bbhm.BaseBaremetalNodeState): + """Mutable and immutable information tracked for a host. + This is an attempt to remove the ad-hoc data structures + previously used and lock down access. + """ + + def update_from_compute_node(self, compute): + """Update information about a host from its compute_node info.""" + super(IronicNodeState, self).update_from_compute_node(compute) + + self.total_usable_disk_gb = compute['local_gb'] + self.updated = compute['updated_at'] + + def consume_from_instance(self, instance): + """Consume nodes entire resources regardless of instance request.""" + super(IronicNodeState, self).consume_from_instance(instance) + + self.updated = timeutils.utcnow() + + +class IronicHostManager(bbhm.BaseBaremetalHostManager): + """Ironic HostManager class.""" + + def __init__(self): + super(IronicHostManager, self).__init__() + if CONF.scheduler_use_baremetal_filters: + baremetal_default = CONF.baremetal_scheduler_default_filters + CONF.scheduler_default_filters = baremetal_default + + def host_state_cls(self, host, node, **kwargs): + """Factory function/property to create a new HostState.""" + compute = kwargs.get('compute') + if compute and compute.get('cpu_info') == 'baremetal cpu': + return IronicNodeState(host, node, **kwargs) + else: + return host_manager.HostState(host, node, **kwargs) diff --git a/nova/tests/scheduler/ironic_fakes.py b/nova/tests/scheduler/ironic_fakes.py new file mode 100644 index 0000000000..266fab08d8 --- /dev/null +++ b/nova/tests/scheduler/ironic_fakes.py @@ -0,0 +1,75 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Fake nodes for Ironic host manager tests. +""" + +from nova.openstack.common import jsonutils + + +COMPUTE_NODES = [ + dict(id=1, local_gb=10, memory_mb=1024, vcpus=1, + vcpus_used=0, local_gb_used=0, memory_mb_used=0, + updated_at=None, cpu_info='baremetal cpu', + service=dict(host='host1', disabled=False), + hypervisor_hostname='node1uuid', host_ip='127.0.0.1', + hypervisor_version=1, hypervisor_type='ironic', + stats=jsonutils.dumps(dict(ironic_driver= + "nova.virt.ironic.driver.IronicDriver", + cpu_arch='i386')), + supported_instances='[["i386", "baremetal", "baremetal"]]', + free_disk_gb=10, free_ram_mb=1024), + dict(id=2, local_gb=20, memory_mb=2048, vcpus=1, + vcpus_used=0, local_gb_used=0, memory_mb_used=0, + updated_at=None, cpu_info='baremetal cpu', + service=dict(host='host2', disabled=True), + hypervisor_hostname='node2uuid', host_ip='127.0.0.1', + hypervisor_version=1, hypervisor_type='ironic', + stats=jsonutils.dumps(dict(ironic_driver= + "nova.virt.ironic.driver.IronicDriver", + cpu_arch='i386')), + supported_instances='[["i386", "baremetal", "baremetal"]]', + free_disk_gb=20, free_ram_mb=2048), + dict(id=3, local_gb=30, memory_mb=3072, vcpus=1, + vcpus_used=0, local_gb_used=0, memory_mb_used=0, + updated_at=None, cpu_info='baremetal cpu', + service=dict(host='host3', disabled=False), + hypervisor_hostname='node3uuid', host_ip='127.0.0.1', + hypervisor_version=1, hypervisor_type='ironic', + stats=jsonutils.dumps(dict(ironic_driver= + "nova.virt.ironic.driver.IronicDriver", + cpu_arch='i386')), + supported_instances='[["i386", "baremetal", "baremetal"]]', + free_disk_gb=30, free_ram_mb=3072), + dict(id=4, local_gb=40, memory_mb=4096, vcpus=1, + vcpus_used=0, local_gb_used=0, memory_mb_used=0, + updated_at=None, cpu_info='baremetal cpu', + service=dict(host='host4', disabled=False), + hypervisor_hostname='node4uuid', host_ip='127.0.0.1', + hypervisor_version=1, hypervisor_type='ironic', + stats=jsonutils.dumps(dict(ironic_driver= + "nova.virt.ironic.driver.IronicDriver", + cpu_arch='i386')), + supported_instances='[["i386", "baremetal", "baremetal"]]', + free_disk_gb=40, free_ram_mb=4096), + # Broken entry + dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None, + cpu_info='baremetal cpu', + stats=jsonutils.dumps(dict(ironic_driver= + "nova.virt.ironic.driver.IronicDriver", + cpu_arch='i386')), + supported_instances='[["i386", "baremetal", "baremetal"]]', + free_disk_gb=50, free_ram_mb=5120), +] diff --git a/nova/tests/scheduler/test_ironic_host_manager.py b/nova/tests/scheduler/test_ironic_host_manager.py new file mode 100644 index 0000000000..761dbf893a --- /dev/null +++ b/nova/tests/scheduler/test_ironic_host_manager.py @@ -0,0 +1,412 @@ +# Copyright (c) 2014 OpenStack Foundation +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For IronicHostManager +""" + +import mock + +from nova import db +from nova import exception +from nova.openstack.common import jsonutils +from nova.scheduler import filters +from nova.scheduler import host_manager +from nova.scheduler import ironic_host_manager +from nova import test +from nova.tests.scheduler import ironic_fakes + + +class FakeFilterClass1(filters.BaseHostFilter): + def host_passes(self, host_state, filter_properties): + pass + + +class FakeFilterClass2(filters.BaseHostFilter): + def host_passes(self, host_state, filter_properties): + pass + + +class IronicHostManagerTestCase(test.NoDBTestCase): + """Test case for IronicHostManager class.""" + + def setUp(self): + super(IronicHostManagerTestCase, self).setUp() + self.host_manager = ironic_host_manager.IronicHostManager() + + def test_get_all_host_states(self): + # Ensure .service is set and we have the values we expect to. + context = 'fake_context' + + self.mox.StubOutWithMock(db, 'compute_node_get_all') + db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) + self.mox.ReplayAll() + + self.host_manager.get_all_host_states(context) + host_states_map = self.host_manager.host_state_map + + self.assertEqual(len(host_states_map), 4) + for i in range(4): + compute_node = ironic_fakes.COMPUTE_NODES[i] + host = compute_node['service']['host'] + node = compute_node['hypervisor_hostname'] + state_key = (host, node) + self.assertEqual(compute_node['service'], + host_states_map[state_key].service) + self.assertEqual(jsonutils.loads(compute_node['stats']), + host_states_map[state_key].stats) + self.assertEqual(compute_node['free_ram_mb'], + host_states_map[state_key].free_ram_mb) + self.assertEqual(compute_node['free_disk_gb'] * 1024, + host_states_map[state_key].free_disk_mb) + + +class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase): + """Test case for IronicHostManager class.""" + + def setUp(self): + super(IronicHostManagerChangedNodesTestCase, self).setUp() + self.host_manager = ironic_host_manager.IronicHostManager() + ironic_driver = "nova.virt.ironic.driver.IronicDriver" + supported_instances = '[["i386", "baremetal", "baremetal"]]' + self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1, + vcpus_used=0, local_gb_used=0, memory_mb_used=0, + updated_at=None, cpu_info='baremetal cpu', + stats=jsonutils.dumps(dict( + ironic_driver=ironic_driver, + cpu_arch='i386')), + supported_instances=supported_instances, + free_disk_gb=10, free_ram_mb=1024) + + @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__') + def test_create_ironic_node_state(self, init_mock): + init_mock.return_value = None + compute = {'cpu_info': 'baremetal cpu'} + host_state = self.host_manager.host_state_cls('fake-host', 'fake-node', + compute=compute) + self.assertIs(ironic_host_manager.IronicNodeState, type(host_state)) + + @mock.patch.object(host_manager.HostState, '__init__') + def test_create_non_ironic_host_state(self, init_mock): + init_mock.return_value = None + compute = {'cpu_info': 'other cpu'} + host_state = self.host_manager.host_state_cls('fake-host', 'fake-node', + compute=compute) + self.assertIs(host_manager.HostState, type(host_state)) + + def test_get_all_host_states_after_delete_one(self): + context = 'fake_context' + + self.mox.StubOutWithMock(db, 'compute_node_get_all') + # all nodes active for first call + db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) + # remove node4 for second call + running_nodes = [n for n in ironic_fakes.COMPUTE_NODES + if n.get('hypervisor_hostname') != 'node4uuid'] + db.compute_node_get_all(context).AndReturn(running_nodes) + self.mox.ReplayAll() + + self.host_manager.get_all_host_states(context) + self.host_manager.get_all_host_states(context) + host_states_map = self.host_manager.host_state_map + self.assertEqual(3, len(host_states_map)) + + def test_get_all_host_states_after_delete_all(self): + context = 'fake_context' + + self.mox.StubOutWithMock(db, 'compute_node_get_all') + # all nodes active for first call + db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES) + # remove all nodes for second call + db.compute_node_get_all(context).AndReturn([]) + self.mox.ReplayAll() + + self.host_manager.get_all_host_states(context) + self.host_manager.get_all_host_states(context) + host_states_map = self.host_manager.host_state_map + self.assertEqual(0, len(host_states_map)) + + def test_update_from_compute_node(self): + host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") + host.update_from_compute_node(self.compute_node) + + self.assertEqual(1024, host.free_ram_mb) + self.assertEqual(1024, host.total_usable_ram_mb) + self.assertEqual(10240, host.free_disk_mb) + self.assertEqual(1, host.vcpus_total) + self.assertEqual(0, host.vcpus_used) + self.assertEqual(jsonutils.loads(self.compute_node['stats']), + host.stats) + + def test_consume_identical_instance_from_compute(self): + host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") + host.update_from_compute_node(self.compute_node) + + instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1) + host.consume_from_instance(instance) + + self.assertEqual(1, host.vcpus_used) + self.assertEqual(0, host.free_ram_mb) + self.assertEqual(0, host.free_disk_mb) + + def test_consume_larger_instance_from_compute(self): + host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") + host.update_from_compute_node(self.compute_node) + + instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2) + host.consume_from_instance(instance) + + self.assertEqual(1, host.vcpus_used) + self.assertEqual(0, host.free_ram_mb) + self.assertEqual(0, host.free_disk_mb) + + def test_consume_smaller_instance_from_compute(self): + host = ironic_host_manager.IronicNodeState("fakehost", "fakenode") + host.update_from_compute_node(self.compute_node) + + instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1) + host.consume_from_instance(instance) + + self.assertEqual(1, host.vcpus_used) + self.assertEqual(0, host.free_ram_mb) + self.assertEqual(0, host.free_disk_mb) + + +class IronicHostManagerTestFilters(test.NoDBTestCase): + """Test filters work for IronicHostManager.""" + + def setUp(self): + super(IronicHostManagerTestFilters, self).setUp() + self.host_manager = ironic_host_manager.IronicHostManager() + self.fake_hosts = [ironic_host_manager.IronicNodeState( + 'fake_host%s' % x, 'fake-node') for x in range(1, 5)] + self.fake_hosts += [ironic_host_manager.IronicNodeState( + 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)] + + def test_choose_host_filters_not_found(self): + self.flags(scheduler_default_filters='FakeFilterClass3') + self.host_manager.filter_classes = [FakeFilterClass1, + FakeFilterClass2] + self.assertRaises(exception.SchedulerHostFilterNotFound, + self.host_manager._choose_host_filters, None) + + def test_choose_host_filters(self): + self.flags(scheduler_default_filters=['FakeFilterClass2']) + self.host_manager.filter_classes = [FakeFilterClass1, + FakeFilterClass2] + + # Test we returns 1 correct function + filter_classes = self.host_manager._choose_host_filters(None) + self.assertEqual(1, len(filter_classes)) + self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) + + def _mock_get_filtered_hosts(self, info, specified_filters=None): + self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters') + + info['got_objs'] = [] + info['got_fprops'] = [] + + def fake_filter_one(_self, obj, filter_props): + info['got_objs'].append(obj) + info['got_fprops'].append(filter_props) + return True + + self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one) + self.host_manager._choose_host_filters(specified_filters).AndReturn( + [FakeFilterClass1]) + + def _verify_result(self, info, result, filters=True): + for x in info['got_fprops']: + self.assertEqual(x, info['expected_fprops']) + if filters: + self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) + self.assertEqual(set(info['expected_objs']), set(result)) + + def test_get_filtered_hosts(self): + fake_properties = {'moo': 1, 'cow': 2} + + info = {'expected_objs': self.fake_hosts, + 'expected_fprops': fake_properties} + + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result) + + def test_get_filtered_hosts_with_specified_filters(self): + fake_properties = {'moo': 1, 'cow': 2} + + specified_filters = ['FakeFilterClass1', 'FakeFilterClass2'] + info = {'expected_objs': self.fake_hosts, + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info, specified_filters) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties, filter_class_names=specified_filters) + self._verify_result(info, result) + + def test_get_filtered_hosts_with_ignore(self): + fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3', + 'fake_host5', 'fake_multihost']} + + # [1] and [3] are host2 and host4 + info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result) + + def test_get_filtered_hosts_with_force_hosts(self): + fake_properties = {'force_hosts': ['fake_host1', 'fake_host3', + 'fake_host5']} + + # [0] and [2] are host1 and host3 + info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_no_matching_force_hosts(self): + fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']} + + info = {'expected_objs': [], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_ignore_and_force_hosts(self): + # Ensure ignore_hosts processed before force_hosts in host filters. + fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'], + 'ignore_hosts': ['fake_host1']} + + # only fake_host3 should be left. + info = {'expected_objs': [self.fake_hosts[2]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_host_and_many_nodes(self): + # Ensure all nodes returned for a host with many nodes + fake_properties = {'force_hosts': ['fake_multihost']} + + info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5], + self.fake_hosts[6], self.fake_hosts[7]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_nodes(self): + fake_properties = {'force_nodes': ['fake-node2', 'fake-node4', + 'fake-node9']} + + # [5] is fake-node2, [7] is fake-node4 + info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_hosts_and_nodes(self): + # Ensure only overlapping results if both force host and node + fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'], + 'force_nodes': ['fake-node2', 'fake-node9']} + + # [5] is fake-node2 + info = {'expected_objs': [self.fake_hosts[5]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self): + # Ensure non-overlapping force_node and force_host yield no result + fake_properties = {'force_hosts': ['fake_multihost'], + 'force_nodes': ['fake-node']} + + info = {'expected_objs': [], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self): + # Ensure ignore_hosts can coexist with force_nodes + fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'], + 'ignore_hosts': ['fake_host1', 'fake_host2']} + + info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) + + def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self): + # Ensure ignore_hosts is processed before force_nodes + fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'], + 'ignore_hosts': ['fake_multihost']} + + info = {'expected_objs': [], + 'expected_fprops': fake_properties} + self._mock_get_filtered_hosts(info) + + self.mox.ReplayAll() + + result = self.host_manager.get_filtered_hosts(self.fake_hosts, + fake_properties) + self._verify_result(info, result, False) From 904fad326b249672908c94daf03c8c35cd4c7365 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 28 May 2014 14:00:14 +0200 Subject: [PATCH 246/486] EC2: fixed AttributeError when metadata is not found Change-Id: I91e0141bd89cd18596ccae2ea213f641ba4d7f74 Closes-Bug: 1324091 --- nova/api/metadata/handler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index 92ae37c017..ec0f31125b 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -206,8 +206,7 @@ def _handle_instance_id_request(self, req): if meta_data is None: LOG.error(_LE('Failed to get metadata for instance id: %s'), instance_id) - - if meta_data.instance['project_id'] != tenant_id: + elif meta_data.instance['project_id'] != tenant_id: LOG.warn(_LW("Tenant_id %(tenant_id)s does not match tenant_id " "of instance %(instance_id)s."), {'tenant_id': tenant_id, 'instance_id': instance_id}) From fadb91e99c0f66e7b7ec25f17bdfad440f670668 Mon Sep 17 00:00:00 2001 From: liyingjun Date: Fri, 1 Aug 2014 18:22:39 +0800 Subject: [PATCH 247/486] Change LOG.warn to LOG.debug in _shutdown_instance Change LOG.warn to LOG.debug for detaching volume related exceptions in _shutdown_instance in compute/manager.py, since as the log message said the exceptions should be ignored. It's probably not worth a warning in the log. Change-Id: Ibe216b9bccf590e64aa1b966753bd1ec67c8dbdf --- nova/compute/manager.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 45874a3876..5fa8ee4c56 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2244,11 +2244,11 @@ def _shutdown_instance(self, context, instance, connector) self.volume_api.detach(context, bdm.volume_id) except exception.DiskNotFound as exc: - LOG.warn(_('Ignoring DiskNotFound: %s') % exc, - instance=instance) + LOG.debug('Ignoring DiskNotFound: %s', exc, + instance=instance) except exception.VolumeNotFound as exc: - LOG.warn(_('Ignoring VolumeNotFound: %s') % exc, - instance=instance) + LOG.debug('Ignoring VolumeNotFound: %s', exc, + instance=instance) except cinder_exception.EndpointNotFound as exc: LOG.warn(_LW('Ignoring EndpointNotFound: %s'), exc, instance=instance) From 5830aed6295664b42cd773fb4bb64629f57c496b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 24 Jul 2014 22:31:01 -0400 Subject: [PATCH 248/486] docs - fix missing references Change-Id: Ia33fc8a3495ef6d4b929aa88efccc002616475fc --- doc/source/devref/filter_scheduler.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst index a5e20d83b7..872f3f6337 100644 --- a/doc/source/devref/filter_scheduler.rst +++ b/doc/source/devref/filter_scheduler.rst @@ -366,8 +366,11 @@ in :mod:``nova.tests.scheduler``. .. |TrustedFilter| replace:: :class:`TrustedFilter ` .. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter ` .. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter ` +.. |ServerGroupAntiAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` +.. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` .. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter ` .. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation ` .. |RamWeigher| replace:: :class:`RamWeigher ` .. |AggregateImagePropertiesIsolation| replace:: :class:`AggregateImagePropertiesIsolation ` .. |MetricsFilter| replace:: :class:`MetricsFilter ` +.. |MetricsWeigher| replace:: :class:`MetricsWeigher ` From 24a24fcf52bf22a464d8fe2a652050fd0016d6c9 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 24 Jul 2014 22:44:07 -0400 Subject: [PATCH 249/486] docs - Add an index for the command line utilities Change-Id: I03376a9159c3ef6c2b65cce417a217db68e8c246 --- doc/source/index.rst | 1 + doc/source/man/index.rst | 49 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 doc/source/man/index.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 077fc2920d..37fcb48b88 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -47,6 +47,7 @@ Developer Docs :maxdepth: 1 devref/index + man/index API Extensions ============== diff --git a/doc/source/man/index.rst b/doc/source/man/index.rst new file mode 100644 index 0000000000..af0e4b83c7 --- /dev/null +++ b/doc/source/man/index.rst @@ -0,0 +1,49 @@ +.. + Copyright 2010-2011 United States Government as represented by the + Administrator of the National Aeronautics and Space Administration. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Command-line Utilities +====================== + +In this section you will find information on Nova's command line utilities. + +Reference +--------- +.. toctree:: + :maxdepth: 3 + + nova-all + nova-api-ec2 + nova-api-metadata + nova-api-os-compute + nova-api + nova-baremetal-deploy-helper + nova-baremetal-manage + nova-cert + nova-compute + nova-conductor + nova-console + nova-consoleauth + nova-dhcpbridge + nova-manage + nova-network + nova-novncproxy + nova-objectstore + nova-rootwrap + nova-scheduler + nova-spicehtml5proxy + nova-xvpvncproxy + From 365aae94ccb3a37ab7b01181722c4faf95c34b4c Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sun, 27 Jul 2014 21:37:30 -0400 Subject: [PATCH 250/486] docs - Prevent eventlet exception during docs generation build_sphinx has a lot of spurious tracebacks, We don't really need this check for document generation Change-Id: Id9442c5902918592c4c242cc66975062cf9f2461 --- nova/cmd/__init__.py | 7 ++++++- nova/tests/__init__.py | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/nova/cmd/__init__.py b/nova/cmd/__init__.py index cbe5ae351e..5f1129d9c1 100644 --- a/nova/cmd/__init__.py +++ b/nova/cmd/__init__.py @@ -16,13 +16,18 @@ # TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR import os import sys +import traceback # NOTE(mikal): All of this is because if dnspython is present in your # environment then eventlet monkeypatches socket.getaddrinfo() with an # implementation which doesn't work for IPv6. What we're checking here is # that the magic environment variable was set when the import happened. +# NOTE(dims): Prevent this code from kicking in under docs generation +# as it leads to spurious errors/warning. +stack = traceback.extract_stack() if ('eventlet' in sys.modules and - os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and + (len(stack) < 2 or 'sphinx' not in stack[-2][0])): raise ImportError('eventlet imported before nova/cmd/__init__ ' '(env var set to %s)' % os.environ.get('EVENTLET_NO_GREENDNS')) diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 566fabba1d..a40a666484 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -25,13 +25,19 @@ # TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR import os import sys +import traceback + # NOTE(mikal): All of this is because if dnspython is present in your # environment then eventlet monkeypatches socket.getaddrinfo() with an # implementation which doesn't work for IPv6. What we're checking here is # that the magic environment variable was set when the import happened. +# NOTE(dims): Prevent this code from kicking in under docs generation +# as it leads to spurious errors/warning. +stack = traceback.extract_stack() if ('eventlet' in sys.modules and - os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and + (len(stack) < 2 or 'sphinx' not in stack[-2][0])): raise ImportError('eventlet imported before nova/cmd/__init__ ' '(env var set to %s)' % os.environ.get('EVENTLET_NO_GREENDNS')) From 4fed8feb09e512d3eb76f6b8d9e80969e58f9398 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jul 2014 12:45:41 -0700 Subject: [PATCH 251/486] docs - Fix indentation for RPC API's Fix ERROR(s) and WARNING(s) during docs generation because of bad indentation, spaces etc. Change-Id: I450e929fbc8e148491b8b0a96a8c6d2aff605ab4 --- nova/cells/rpcapi.py | 66 +++---- nova/compute/rpcapi.py | 364 +++++++++++++++++++------------------ nova/conductor/rpcapi.py | 233 ++++++++++++------------ nova/consoleauth/rpcapi.py | 10 +- nova/network/rpcapi.py | 40 ++-- nova/scheduler/rpcapi.py | 53 +++--- 6 files changed, 390 insertions(+), 376 deletions(-) diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py index 87cf8f3c46..769552e294 100644 --- a/nova/cells/rpcapi.py +++ b/nova/cells/rpcapi.py @@ -48,54 +48,54 @@ class CellsAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Adds get_cell_info_for_neighbors() and sync_instances() - 1.2 - Adds service_get_all(), service_get_by_compute_host(), - and proxy_rpc_to_compute_manager() - 1.3 - Adds task_log_get_all() - 1.4 - Adds compute_node_get(), compute_node_get_all(), and - compute_node_stats() - 1.5 - Adds actions_get(), action_get_by_request_id(), and - action_events_get() - 1.6 - Adds consoleauth_delete_tokens() and validate_console_port() + * 1.0 - Initial version. + * 1.1 - Adds get_cell_info_for_neighbors() and sync_instances() + * 1.2 - Adds service_get_all(), service_get_by_compute_host(), + and proxy_rpc_to_compute_manager() + * 1.3 - Adds task_log_get_all() + * 1.4 - Adds compute_node_get(), compute_node_get_all(), and + compute_node_stats() + * 1.5 - Adds actions_get(), action_get_by_request_id(), and + action_events_get() + * 1.6 - Adds consoleauth_delete_tokens() and validate_console_port() ... Grizzly supports message version 1.6. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.6. - 1.7 - Adds service_update() - 1.8 - Adds build_instances(), deprecates schedule_run_instance() - 1.9 - Adds get_capacities() - 1.10 - Adds bdm_update_or_create_at_top(), and bdm_destroy_at_top() - 1.11 - Adds get_migrations() - 1.12 - Adds instance_start() and instance_stop() - 1.13 - Adds cell_create(), cell_update(), cell_delete(), and - cell_get() - 1.14 - Adds reboot_instance() - 1.15 - Adds suspend_instance() and resume_instance() - 1.16 - Adds instance_update_from_api() - 1.17 - Adds get_host_uptime() - 1.18 - Adds terminate_instance() and soft_delete_instance() - 1.19 - Adds pause_instance() and unpause_instance() - 1.20 - Adds resize_instance() and live_migrate_instance() - 1.21 - Adds revert_resize() and confirm_resize() - 1.22 - Adds reset_network() - 1.23 - Adds inject_network_info() - 1.24 - Adds backup_instance() and snapshot_instance() + * 1.7 - Adds service_update() + * 1.8 - Adds build_instances(), deprecates schedule_run_instance() + * 1.9 - Adds get_capacities() + * 1.10 - Adds bdm_update_or_create_at_top(), and bdm_destroy_at_top() + * 1.11 - Adds get_migrations() + * 1.12 - Adds instance_start() and instance_stop() + * 1.13 - Adds cell_create(), cell_update(), cell_delete(), and + cell_get() + * 1.14 - Adds reboot_instance() + * 1.15 - Adds suspend_instance() and resume_instance() + * 1.16 - Adds instance_update_from_api() + * 1.17 - Adds get_host_uptime() + * 1.18 - Adds terminate_instance() and soft_delete_instance() + * 1.19 - Adds pause_instance() and unpause_instance() + * 1.20 - Adds resize_instance() and live_migrate_instance() + * 1.21 - Adds revert_resize() and confirm_resize() + * 1.22 - Adds reset_network() + * 1.23 - Adds inject_network_info() + * 1.24 - Adds backup_instance() and snapshot_instance() ... Havana supports message version 1.24. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.24. - 1.25 - Adds rebuild_instance() - 1.26 - Adds service_delete() - 1.27 - Updates instance_delete_everywhere() for instance objects + * 1.25 - Adds rebuild_instance() + * 1.26 - Adds service_delete() + * 1.27 - Updates instance_delete_everywhere() for instance objects ... Icehouse supports message version 1.27. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.27. - 1.28 - Make bdm_update_or_create_at_top and use bdm objects + * 1.28 - Make bdm_update_or_create_at_top and use bdm objects ''' VERSION_ALIASES = { diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index 5de33f0729..7aeed520e7 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -67,199 +67,207 @@ class ComputeAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Adds get_host_uptime() - 1.2 - Adds check_can_live_migrate_[destination|source] - 1.3 - Adds change_instance_metadata() - 1.4 - Remove instance_uuid, add instance argument to reboot_instance() - 1.5 - Remove instance_uuid, add instance argument to pause_instance(), - unpause_instance() - 1.6 - Remove instance_uuid, add instance argument to suspend_instance() - 1.7 - Remove instance_uuid, add instance argument to - get_console_output() - 1.8 - Remove instance_uuid, add instance argument to - add_fixed_ip_to_instance() - 1.9 - Remove instance_uuid, add instance argument to attach_volume() - 1.10 - Remove instance_id, add instance argument to - check_can_live_migrate_destination() - 1.11 - Remove instance_id, add instance argument to - check_can_live_migrate_source() - 1.12 - Remove instance_uuid, add instance argument to confirm_resize() - 1.13 - Remove instance_uuid, add instance argument to detach_volume() - 1.14 - Remove instance_uuid, add instance argument to finish_resize() - 1.15 - Remove instance_uuid, add instance argument to - finish_revert_resize() - 1.16 - Remove instance_uuid, add instance argument to get_diagnostics() - 1.17 - Remove instance_uuid, add instance argument to get_vnc_console() - 1.18 - Remove instance_uuid, add instance argument to inject_file() - 1.19 - Remove instance_uuid, add instance argument to - inject_network_info() - 1.20 - Remove instance_id, add instance argument to - post_live_migration_at_destination() - 1.21 - Remove instance_uuid, add instance argument to - power_off_instance() and stop_instance() - 1.22 - Remove instance_uuid, add instance argument to - power_on_instance() and start_instance() - 1.23 - Remove instance_id, add instance argument to - pre_live_migration() - 1.24 - Remove instance_uuid, add instance argument to - rebuild_instance() - 1.25 - Remove instance_uuid, add instance argument to - remove_fixed_ip_from_instance() - 1.26 - Remove instance_id, add instance argument to - remove_volume_connection() - 1.27 - Remove instance_uuid, add instance argument to - rescue_instance() - 1.28 - Remove instance_uuid, add instance argument to reset_network() - 1.29 - Remove instance_uuid, add instance argument to resize_instance() - 1.30 - Remove instance_uuid, add instance argument to resume_instance() - 1.31 - Remove instance_uuid, add instance argument to revert_resize() - 1.32 - Remove instance_id, add instance argument to - rollback_live_migration_at_destination() - 1.33 - Remove instance_uuid, add instance argument to - set_admin_password() - 1.34 - Remove instance_uuid, add instance argument to - snapshot_instance() - 1.35 - Remove instance_uuid, add instance argument to - unrescue_instance() - 1.36 - Remove instance_uuid, add instance argument to - change_instance_metadata() - 1.37 - Remove instance_uuid, add instance argument to - terminate_instance() - 1.38 - Changes to prep_resize(): - - remove instance_uuid, add instance - - remove instance_type_id, add instance_type - - remove topic, it was unused - 1.39 - Remove instance_uuid, add instance argument to run_instance() - 1.40 - Remove instance_id, add instance argument to live_migration() - 1.41 - Adds refresh_instance_security_rules() - 1.42 - Add reservations arg to prep_resize(), resize_instance(), - finish_resize(), confirm_resize(), revert_resize() and - finish_revert_resize() - 1.43 - Add migrate_data to live_migration() - 1.44 - Adds reserve_block_device_name() - - 2.0 - Remove 1.x backwards compat - 2.1 - Adds orig_sys_metadata to rebuild_instance() - 2.2 - Adds slave_info parameter to add_aggregate_host() and - remove_aggregate_host() - 2.3 - Adds volume_id to reserve_block_device_name() - 2.4 - Add bdms to terminate_instance - 2.5 - Add block device and network info to reboot_instance - 2.6 - Remove migration_id, add migration to resize_instance - 2.7 - Remove migration_id, add migration to confirm_resize - 2.8 - Remove migration_id, add migration to finish_resize - 2.9 - Add publish_service_capabilities() - 2.10 - Adds filter_properties and request_spec to prep_resize() - 2.11 - Adds soft_delete_instance() and restore_instance() - 2.12 - Remove migration_id, add migration to revert_resize - 2.13 - Remove migration_id, add migration to finish_revert_resize - 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host - 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host - 2.16 - Add instance_type to resize_instance - 2.17 - Add get_backdoor_port() - 2.18 - Add bdms to rebuild_instance - 2.19 - Add node to run_instance - 2.20 - Add node to prep_resize - 2.21 - Add migrate_data dict param to pre_live_migration() - 2.22 - Add recreate, on_shared_storage and host arguments to - rebuild_instance() - 2.23 - Remove network_info from reboot_instance - 2.24 - Added get_spice_console method - 2.25 - Add attach_interface() and detach_interface() - 2.26 - Add validate_console_port to ensure the service connects to - vnc on the correct port - 2.27 - Adds 'reservations' to terminate_instance() and - soft_delete_instance() + * 1.0 - Initial version. + * 1.1 - Adds get_host_uptime() + * 1.2 - Adds check_can_live_migrate_[destination|source] + * 1.3 - Adds change_instance_metadata() + * 1.4 - Remove instance_uuid, add instance argument to + reboot_instance() + * 1.5 - Remove instance_uuid, add instance argument to + pause_instance(), unpause_instance() + * 1.6 - Remove instance_uuid, add instance argument to + suspend_instance() + * 1.7 - Remove instance_uuid, add instance argument to + get_console_output() + * 1.8 - Remove instance_uuid, add instance argument to + add_fixed_ip_to_instance() + * 1.9 - Remove instance_uuid, add instance argument to attach_volume() + * 1.10 - Remove instance_id, add instance argument to + check_can_live_migrate_destination() + * 1.11 - Remove instance_id, add instance argument to + check_can_live_migrate_source() + * 1.12 - Remove instance_uuid, add instance argument to + confirm_resize() + * 1.13 - Remove instance_uuid, add instance argument to detach_volume() + * 1.14 - Remove instance_uuid, add instance argument to finish_resize() + * 1.15 - Remove instance_uuid, add instance argument to + finish_revert_resize() + * 1.16 - Remove instance_uuid, add instance argument to + get_diagnostics() + * 1.17 - Remove instance_uuid, add instance argument to + get_vnc_console() + * 1.18 - Remove instance_uuid, add instance argument to inject_file() + * 1.19 - Remove instance_uuid, add instance argument to + inject_network_info() + * 1.20 - Remove instance_id, add instance argument to + post_live_migration_at_destination() + * 1.21 - Remove instance_uuid, add instance argument to + power_off_instance() and stop_instance() + * 1.22 - Remove instance_uuid, add instance argument to + power_on_instance() and start_instance() + * 1.23 - Remove instance_id, add instance argument to + pre_live_migration() + * 1.24 - Remove instance_uuid, add instance argument to + rebuild_instance() + * 1.25 - Remove instance_uuid, add instance argument to + remove_fixed_ip_from_instance() + * 1.26 - Remove instance_id, add instance argument to + remove_volume_connection() + * 1.27 - Remove instance_uuid, add instance argument to + rescue_instance() + * 1.28 - Remove instance_uuid, add instance argument to reset_network() + * 1.29 - Remove instance_uuid, add instance argument to + resize_instance() + * 1.30 - Remove instance_uuid, add instance argument to + resume_instance() + * 1.31 - Remove instance_uuid, add instance argument to revert_resize() + * 1.32 - Remove instance_id, add instance argument to + rollback_live_migration_at_destination() + * 1.33 - Remove instance_uuid, add instance argument to + set_admin_password() + * 1.34 - Remove instance_uuid, add instance argument to + snapshot_instance() + * 1.35 - Remove instance_uuid, add instance argument to + unrescue_instance() + * 1.36 - Remove instance_uuid, add instance argument to + change_instance_metadata() + * 1.37 - Remove instance_uuid, add instance argument to + terminate_instance() + * 1.38 - Changes to prep_resize(): + * remove instance_uuid, add instance + * remove instance_type_id, add instance_type + * remove topic, it was unused + * 1.39 - Remove instance_uuid, add instance argument to run_instance() + * 1.40 - Remove instance_id, add instance argument to live_migration() + * 1.41 - Adds refresh_instance_security_rules() + * 1.42 - Add reservations arg to prep_resize(), resize_instance(), + finish_resize(), confirm_resize(), revert_resize() and + finish_revert_resize() + * 1.43 - Add migrate_data to live_migration() + * 1.44 - Adds reserve_block_device_name() + + * 2.0 - Remove 1.x backwards compat + * 2.1 - Adds orig_sys_metadata to rebuild_instance() + * 2.2 - Adds slave_info parameter to add_aggregate_host() and + remove_aggregate_host() + * 2.3 - Adds volume_id to reserve_block_device_name() + * 2.4 - Add bdms to terminate_instance + * 2.5 - Add block device and network info to reboot_instance + * 2.6 - Remove migration_id, add migration to resize_instance + * 2.7 - Remove migration_id, add migration to confirm_resize + * 2.8 - Remove migration_id, add migration to finish_resize + * 2.9 - Add publish_service_capabilities() + * 2.10 - Adds filter_properties and request_spec to prep_resize() + * 2.11 - Adds soft_delete_instance() and restore_instance() + * 2.12 - Remove migration_id, add migration to revert_resize + * 2.13 - Remove migration_id, add migration to finish_revert_resize + * 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host + * 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host + * 2.16 - Add instance_type to resize_instance + * 2.17 - Add get_backdoor_port() + * 2.18 - Add bdms to rebuild_instance + * 2.19 - Add node to run_instance + * 2.20 - Add node to prep_resize + * 2.21 - Add migrate_data dict param to pre_live_migration() + * 2.22 - Add recreate, on_shared_storage and host arguments to + rebuild_instance() + * 2.23 - Remove network_info from reboot_instance + * 2.24 - Added get_spice_console method + * 2.25 - Add attach_interface() and detach_interface() + * 2.26 - Add validate_console_port to ensure the service connects to + vnc on the correct port + * 2.27 - Adds 'reservations' to terminate_instance() and + soft_delete_instance() ... Grizzly supports message version 2.27. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.27. - 2.28 - Adds check_instance_shared_storage() - 2.29 - Made start_instance() and stop_instance() take new-world - instance objects - 2.30 - Adds live_snapshot_instance() - 2.31 - Adds shelve_instance(), shelve_offload_instance, and - unshelve_instance() - 2.32 - Make reboot_instance take a new world instance object - 2.33 - Made suspend_instance() and resume_instance() take new-world - instance objects - 2.34 - Added swap_volume() - 2.35 - Made terminate_instance() and soft_delete_instance() take - new-world instance objects - 2.36 - Made pause_instance() and unpause_instance() take new-world - instance objects - 2.37 - Added the legacy_bdm_in_spec parameter to run_instance - 2.38 - Made check_can_live_migrate_[destination|source] take - new-world instance objects - 2.39 - Made revert_resize() and confirm_resize() take new-world - instance objects - 2.40 - Made reset_network() take new-world instance object - 2.41 - Make inject_network_info take new-world instance object - 2.42 - Splits snapshot_instance() into snapshot_instance() and - backup_instance() and makes them take new-world instance - objects. - 2.43 - Made prep_resize() take new-world instance object - 2.44 - Add volume_snapshot_create(), volume_snapshot_delete() - 2.45 - Made resize_instance() take new-world objects - 2.46 - Made finish_resize() take new-world objects - 2.47 - Made finish_revert_resize() take new-world objects + * 2.28 - Adds check_instance_shared_storage() + * 2.29 - Made start_instance() and stop_instance() take new-world + instance objects + * 2.30 - Adds live_snapshot_instance() + * 2.31 - Adds shelve_instance(), shelve_offload_instance, and + unshelve_instance() + * 2.32 - Make reboot_instance take a new world instance object + * 2.33 - Made suspend_instance() and resume_instance() take new-world + instance objects + * 2.34 - Added swap_volume() + * 2.35 - Made terminate_instance() and soft_delete_instance() take + new-world instance objects + * 2.36 - Made pause_instance() and unpause_instance() take new-world + instance objects + * 2.37 - Added the legacy_bdm_in_spec parameter to run_instance + * 2.38 - Made check_can_live_migrate_[destination|source] take + new-world instance objects + * 2.39 - Made revert_resize() and confirm_resize() take new-world + instance objects + * 2.40 - Made reset_network() take new-world instance object + * 2.41 - Make inject_network_info take new-world instance object + * 2.42 - Splits snapshot_instance() into snapshot_instance() and + backup_instance() and makes them take new-world instance + objects. + * 2.43 - Made prep_resize() take new-world instance object + * 2.44 - Add volume_snapshot_create(), volume_snapshot_delete() + * 2.45 - Made resize_instance() take new-world objects + * 2.46 - Made finish_resize() take new-world objects + * 2.47 - Made finish_revert_resize() take new-world objects ... Havana supports message version 2.47. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.47. - 2.48 - Make add_aggregate_host() and remove_aggregate_host() take - new-world objects - ... - Remove live_snapshot() that was never actually used - - 3.0 - Remove 2.x compatibility - 3.1 - Update get_spice_console() to take an instance object - 3.2 - Update get_vnc_console() to take an instance object - 3.3 - Update validate_console_port() to take an instance object - 3.4 - Update rebuild_instance() to take an instance object - 3.5 - Pass preserve_ephemeral flag to rebuild_instance() - 3.6 - Make volume_snapshot_{create,delete} use new-world objects - 3.7 - Update change_instance_metadata() to take an instance object - 3.8 - Update set_admin_password() to take an instance object - 3.9 - Update rescue_instance() to take an instance object - 3.10 - Added get_rdp_console method - 3.11 - Update unrescue_instance() to take an object - 3.12 - Update add_fixed_ip_to_instance() to take an object - 3.13 - Update remove_fixed_ip_from_instance() to take an object - 3.14 - Update post_live_migration_at_destination() to take an object - 3.15 - Adds filter_properties and node to unshelve_instance() - 3.16 - Make reserve_block_device_name and attach_volume use new-world - objects, and add disk_bus and device_type params to - reserve_block_device_name, and bdm param to attach_volume - 3.17 - Update attach_interface and detach_interface to take an object - 3.18 - Update get_diagnostics() to take an instance object - ... - Removed inject_file(), as it was unused. - 3.19 - Update pre_live_migration to take instance object - 3.20 - Make restore_instance take an instance object - 3.21 - Made rebuild take new-world BDM objects - 3.22 - Made terminate_instance take new-world BDM objects - 3.23 - Added external_instance_event() - - build_and_run_instance was added in Havana and not used or - documented. + * 2.48 - Make add_aggregate_host() and remove_aggregate_host() take + new-world objects + * ... - Remove live_snapshot() that was never actually used + + * 3.0 - Remove 2.x compatibility + * 3.1 - Update get_spice_console() to take an instance object + * 3.2 - Update get_vnc_console() to take an instance object + * 3.3 - Update validate_console_port() to take an instance object + * 3.4 - Update rebuild_instance() to take an instance object + * 3.5 - Pass preserve_ephemeral flag to rebuild_instance() + * 3.6 - Make volume_snapshot_{create,delete} use new-world objects + * 3.7 - Update change_instance_metadata() to take an instance object + * 3.8 - Update set_admin_password() to take an instance object + * 3.9 - Update rescue_instance() to take an instance object + * 3.10 - Added get_rdp_console method + * 3.11 - Update unrescue_instance() to take an object + * 3.12 - Update add_fixed_ip_to_instance() to take an object + * 3.13 - Update remove_fixed_ip_from_instance() to take an object + * 3.14 - Update post_live_migration_at_destination() to take an object + * 3.15 - Adds filter_properties and node to unshelve_instance() + * 3.16 - Make reserve_block_device_name and attach_volume use new-world + objects, and add disk_bus and device_type params to + reserve_block_device_name, and bdm param to attach_volume + * 3.17 - Update attach_interface and detach_interface to take an object + * 3.18 - Update get_diagnostics() to take an instance object + * Removed inject_file(), as it was unused. + * 3.19 - Update pre_live_migration to take instance object + * 3.20 - Make restore_instance take an instance object + * 3.21 - Made rebuild take new-world BDM objects + * 3.22 - Made terminate_instance take new-world BDM objects + * 3.23 - Added external_instance_event() + * build_and_run_instance was added in Havana and not used or + documented. ... Icehouse supports message version 3.23. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.23. - 3.24 - Update rescue_instance() to take optional rescue_image_ref - 3.25 - Make detach_volume take an object - 3.26 - Make live_migration() and - rollback_live_migration_at_destination() take an object - ... - Removed run_instance() - 3.27 - Make run_instance() accept a new-world object - 3.28 - Update get_console_output() to accept a new-world object - 3.29 - Make check_instance_shared_storage accept a new-world object - 3.30 - Make remove_volume_connection() accept a new-world object - 3.31 - Add get_instance_diagnostics - 3.32 - Add destroy_disks and migrate_data optional parameters to - rollback_live_migration_at_destination() + * 3.24 - Update rescue_instance() to take optional rescue_image_ref + * 3.25 - Make detach_volume take an object + * 3.26 - Make live_migration() and + rollback_live_migration_at_destination() take an object + * ... Removed run_instance() + * 3.27 - Make run_instance() accept a new-world object + * 3.28 - Update get_console_output() to accept a new-world object + * 3.29 - Make check_instance_shared_storage accept a new-world object + * 3.30 - Make remove_volume_connection() accept a new-world object + * 3.31 - Add get_instance_diagnostics + * 3.32 - Add destroy_disks and migrate_data optional parameters to + rollback_live_migration_at_destination() + ''' VERSION_ALIASES = { diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 0972f89e9e..2e2cfb9576 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -34,121 +34,123 @@ class ConductorAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Added migration_update - 1.2 - Added instance_get_by_uuid and instance_get_all_by_host - 1.3 - Added aggregate_host_add and aggregate_host_delete - 1.4 - Added migration_get - 1.5 - Added bw_usage_update - 1.6 - Added get_backdoor_port() - 1.7 - Added aggregate_get_by_host, aggregate_metadata_add, - and aggregate_metadata_delete - 1.8 - Added security_group_get_by_instance and - security_group_rule_get_by_security_group - 1.9 - Added provider_fw_rule_get_all - 1.10 - Added agent_build_get_by_triple - 1.11 - Added aggregate_get - 1.12 - Added block_device_mapping_update_or_create - 1.13 - Added block_device_mapping_get_all_by_instance - 1.14 - Added block_device_mapping_destroy - 1.15 - Added instance_get_all_by_filters and - instance_get_all_hung_in_rebooting and - instance_get_active_by_window - Deprecated instance_get_all_by_host - 1.16 - Added instance_destroy - 1.17 - Added instance_info_cache_delete - 1.18 - Added instance_type_get - 1.19 - Added vol_get_usage_by_time and vol_usage_update - 1.20 - Added migration_get_unconfirmed_by_dest_compute - 1.21 - Added service_get_all_by - 1.22 - Added ping - 1.23 - Added instance_get_all - Un-Deprecate instance_get_all_by_host - 1.24 - Added instance_get - 1.25 - Added action_event_start and action_event_finish - 1.26 - Added instance_info_cache_update - 1.27 - Added service_create - 1.28 - Added binary arg to service_get_all_by - 1.29 - Added service_destroy - 1.30 - Added migration_create - 1.31 - Added migration_get_in_progress_by_host_and_node - 1.32 - Added optional node to instance_get_all_by_host - 1.33 - Added compute_node_create and compute_node_update - 1.34 - Added service_update - 1.35 - Added instance_get_active_by_window_joined - 1.36 - Added instance_fault_create - 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task - 1.38 - Added service name to instance_update - 1.39 - Added notify_usage_exists - 1.40 - Added security_groups_trigger_handler and - security_groups_trigger_members_refresh - Remove instance_get_active_by_window - 1.41 - Added fixed_ip_get_by_instance, network_get, - instance_floating_address_get_all, quota_commit, - quota_rollback - 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host - 1.43 - Added compute_stop - 1.44 - Added compute_node_delete - 1.45 - Added project_id to quota_commit and quota_rollback - 1.46 - Added compute_confirm_resize - 1.47 - Added columns_to_join to instance_get_all_by_host and - instance_get_all_by_filters - 1.48 - Added compute_unrescue - - ... Grizzly supports message version 1.48. So, any changes to existing - methods in 2.x after that point should be done such that they can - handle the version_cap being set to 1.48. - - 1.49 - Added columns_to_join to instance_get_by_uuid - 1.50 - Added object_action() and object_class_action() - 1.51 - Added the 'legacy' argument to - block_device_mapping_get_all_by_instance - 1.52 - Pass instance objects for compute_confirm_resize - 1.53 - Added compute_reboot - 1.54 - Added 'update_cells' argument to bw_usage_update - 1.55 - Pass instance objects for compute_stop - 1.56 - Remove compute_confirm_resize and - migration_get_unconfirmed_by_dest_compute - 1.57 - Remove migration_create() - 1.58 - Remove migration_get() - - ... Havana supports message version 1.58. So, any changes to existing - methods in 1.x after that point should be done such that they can - handle the version_cap being set to 1.58. - - 1.59 - Remove instance_info_cache_update() - 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete() - ... - Remove security_group_get_by_instance() and - security_group_rule_get_by_security_group() - 1.61 - Return deleted instance from instance_destroy() - 1.62 - Added object_backport() - 1.63 - Changed the format of values['stats'] from a dict to a JSON string - in compute_node_update() - 1.64 - Added use_slave to instance_get_all_filters() - ... - Remove instance_type_get() - ... - Remove aggregate_get() - ... - Remove aggregate_get_by_host() - ... - Remove instance_get() - ... - Remove migration_update() - ... - Remove block_device_mapping_destroy() - - 2.0 - Drop backwards compatibility - ... - Remove quota_rollback() and quota_commit() - ... - Remove aggregate_host_add() and aggregate_host_delete() - ... - Remove network_migrate_instance_start() and - network_migrate_instance_finish() - - ... Icehouse supports message version 2.0. So, any changes to - existing methods in 2.x after that point should be done such that they - can handle the version_cap being set to 2.0. - ... - Remove instance_destroy() - ... - Remove compute_unrescue() - ... - Remove instance_get_all_by_filters() - ... - Remove instance_get_active_by_window_joined() - ... - Remove instance_fault_create() - ... - Remove action_event_start() and action_event_finish() - ... - Remove instance_get_by_uuid() - ... - Remove agent_build_get_by_triple() + * 1.0 - Initial version. + * 1.1 - Added migration_update + * 1.2 - Added instance_get_by_uuid and instance_get_all_by_host + * 1.3 - Added aggregate_host_add and aggregate_host_delete + * 1.4 - Added migration_get + * 1.5 - Added bw_usage_update + * 1.6 - Added get_backdoor_port() + * 1.7 - Added aggregate_get_by_host, aggregate_metadata_add, + and aggregate_metadata_delete + * 1.8 - Added security_group_get_by_instance and + security_group_rule_get_by_security_group + * 1.9 - Added provider_fw_rule_get_all + * 1.10 - Added agent_build_get_by_triple + * 1.11 - Added aggregate_get + * 1.12 - Added block_device_mapping_update_or_create + * 1.13 - Added block_device_mapping_get_all_by_instance + * 1.14 - Added block_device_mapping_destroy + * 1.15 - Added instance_get_all_by_filters and + instance_get_all_hung_in_rebooting and + instance_get_active_by_window + Deprecated instance_get_all_by_host + * 1.16 - Added instance_destroy + * 1.17 - Added instance_info_cache_delete + * 1.18 - Added instance_type_get + * 1.19 - Added vol_get_usage_by_time and vol_usage_update + * 1.20 - Added migration_get_unconfirmed_by_dest_compute + * 1.21 - Added service_get_all_by + * 1.22 - Added ping + * 1.23 - Added instance_get_all + Un-Deprecate instance_get_all_by_host + * 1.24 - Added instance_get + * 1.25 - Added action_event_start and action_event_finish + * 1.26 - Added instance_info_cache_update + * 1.27 - Added service_create + * 1.28 - Added binary arg to service_get_all_by + * 1.29 - Added service_destroy + * 1.30 - Added migration_create + * 1.31 - Added migration_get_in_progress_by_host_and_node + * 1.32 - Added optional node to instance_get_all_by_host + * 1.33 - Added compute_node_create and compute_node_update + * 1.34 - Added service_update + * 1.35 - Added instance_get_active_by_window_joined + * 1.36 - Added instance_fault_create + * 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task + * 1.38 - Added service name to instance_update + * 1.39 - Added notify_usage_exists + * 1.40 - Added security_groups_trigger_handler and + security_groups_trigger_members_refresh + Remove instance_get_active_by_window + * 1.41 - Added fixed_ip_get_by_instance, network_get, + instance_floating_address_get_all, quota_commit, + quota_rollback + * 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host + * 1.43 - Added compute_stop + * 1.44 - Added compute_node_delete + * 1.45 - Added project_id to quota_commit and quota_rollback + * 1.46 - Added compute_confirm_resize + * 1.47 - Added columns_to_join to instance_get_all_by_host and + instance_get_all_by_filters + * 1.48 - Added compute_unrescue + + ... Grizzly supports message version 1.48. So, any changes to existing + methods in 2.x after that point should be done such that they can + handle the version_cap being set to 1.48. + + * 1.49 - Added columns_to_join to instance_get_by_uuid + * 1.50 - Added object_action() and object_class_action() + * 1.51 - Added the 'legacy' argument to + block_device_mapping_get_all_by_instance + * 1.52 - Pass instance objects for compute_confirm_resize + * 1.53 - Added compute_reboot + * 1.54 - Added 'update_cells' argument to bw_usage_update + * 1.55 - Pass instance objects for compute_stop + * 1.56 - Remove compute_confirm_resize and + migration_get_unconfirmed_by_dest_compute + * 1.57 - Remove migration_create() + * 1.58 - Remove migration_get() + + ... Havana supports message version 1.58. So, any changes to existing + methods in 1.x after that point should be done such that they can + handle the version_cap being set to 1.58. + + * 1.59 - Remove instance_info_cache_update() + * 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete() + * ... - Remove security_group_get_by_instance() and + security_group_rule_get_by_security_group() + * 1.61 - Return deleted instance from instance_destroy() + * 1.62 - Added object_backport() + * 1.63 - Changed the format of values['stats'] from a dict to a JSON string + in compute_node_update() + * 1.64 - Added use_slave to instance_get_all_filters() + - Remove instance_type_get() + - Remove aggregate_get() + - Remove aggregate_get_by_host() + - Remove instance_get() + - Remove migration_update() + - Remove block_device_mapping_destroy() + + * 2.0 - Drop backwards compatibility + - Remove quota_rollback() and quota_commit() + - Remove aggregate_host_add() and aggregate_host_delete() + - Remove network_migrate_instance_start() and + network_migrate_instance_finish() + + ... Icehouse supports message version 2.0. So, any changes to + existing methods in 2.x after that point should be done such + that they can handle the version_cap being set to 2.0. + + * Remove instance_destroy() + * Remove compute_unrescue() + * Remove instance_get_all_by_filters() + * Remove instance_get_active_by_window_joined() + * Remove instance_fault_create() + * Remove action_event_start() and action_event_finish() + * Remove instance_get_by_uuid() + * Remove agent_build_get_by_triple() + """ VERSION_ALIASES = { @@ -368,6 +370,7 @@ class ComputeTaskAPI(object): 1.6 - Made migrate_server use instance objects 1.7 - Do not send block_device_mapping and legacy_bdm to build_instances 1.8 - Add rebuild_instance + """ def __init__(self): diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py index fa130a2541..2bba0f8c96 100644 --- a/nova/consoleauth/rpcapi.py +++ b/nova/consoleauth/rpcapi.py @@ -33,16 +33,16 @@ class ConsoleAuthAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Added get_backdoor_port() - 1.2 - Added instance_uuid to authorize_console, and - delete_tokens_for_instance + * 1.0 - Initial version. + * 1.1 - Added get_backdoor_port() + * 1.2 - Added instance_uuid to authorize_console, and + delete_tokens_for_instance ... Grizzly and Havana support message version 1.2. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.2. - 2.0 - Major API rev for Icehouse + * 2.0 - Major API rev for Icehouse ... Icehouse supports message version 2.0. So, any changes to existing methods in 2.x after that point should be done such that they diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py index d8c0392e59..99034aeb90 100644 --- a/nova/network/rpcapi.py +++ b/nova/network/rpcapi.py @@ -46,39 +46,41 @@ class NetworkAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Adds migrate_instance_[start|finish] - 1.2 - Make migrate_instance_[start|finish] a little more flexible - 1.3 - Adds fanout cast update_dns for multi_host networks - 1.4 - Add get_backdoor_port() - 1.5 - Adds associate - 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip - 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools - 1.8 - Adds macs to allocate_for_instance - 1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes instance_uuid - from allocate_for_instance and instance_get_nw_info + * 1.0 - Initial version. + * 1.1 - Adds migrate_instance_[start|finish] + * 1.2 - Make migrate_instance_[start|finish] a little more flexible + * 1.3 - Adds fanout cast update_dns for multi_host networks + * 1.4 - Add get_backdoor_port() + * 1.5 - Adds associate + * 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip + * 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools + * 1.8 - Adds macs to allocate_for_instance + * 1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes + instance_uuid from allocate_for_instance and + instance_get_nw_info ... Grizzly supports message version 1.9. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.9. - 1.10- Adds (optional) requested_networks to deallocate_for_instance + * 1.10- Adds (optional) requested_networks to deallocate_for_instance ... Havana supports message version 1.10. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.10. - NOTE: remove unused method get_vifs_by_instance() - NOTE: remove unused method get_vif_by_mac_address() - NOTE: remove unused method get_network() - NOTE: remove unused method get_all_networks() - 1.11 - Add instance to deallocate_for_instance(). Remove instance_id, - project_id, and host. - 1.12 - Add instance to deallocate_fixed_ip() + * NOTE: remove unused method get_vifs_by_instance() + * NOTE: remove unused method get_vif_by_mac_address() + * NOTE: remove unused method get_network() + * NOTE: remove unused method get_all_networks() + * 1.11 - Add instance to deallocate_for_instance(). + Remove instance_id, project_id, and host. + * 1.12 - Add instance to deallocate_fixed_ip() ... Icehouse supports message version 1.12. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.12. + ''' VERSION_ALIASES = { diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py index 3ff86f0ebf..dd3c342b5a 100644 --- a/nova/scheduler/rpcapi.py +++ b/nova/scheduler/rpcapi.py @@ -42,48 +42,49 @@ class SchedulerAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Changes to prep_resize(): - - remove instance_uuid, add instance - - remove instance_type_id, add instance_type - - remove topic, it was unused - 1.2 - Remove topic from run_instance, it was unused - 1.3 - Remove instance_id, add instance to live_migration - 1.4 - Remove update_db from prep_resize - 1.5 - Add reservations argument to prep_resize() - 1.6 - Remove reservations argument to run_instance() - 1.7 - Add create_volume() method, remove topic from live_migration() - - 2.0 - Remove 1.x backwards compat - 2.1 - Add image_id to create_volume() - 2.2 - Remove reservations argument to create_volume() - 2.3 - Remove create_volume() - 2.4 - Change update_service_capabilities() - - accepts a list of capabilities - 2.5 - Add get_backdoor_port() - 2.6 - Add select_hosts() + * 1.0 - Initial version. + * 1.1 - Changes to prep_resize(): + * remove instance_uuid, add instance + * remove instance_type_id, add instance_type + * remove topic, it was unused + * 1.2 - Remove topic from run_instance, it was unused + * 1.3 - Remove instance_id, add instance to live_migration + * 1.4 - Remove update_db from prep_resize + * 1.5 - Add reservations argument to prep_resize() + * 1.6 - Remove reservations argument to run_instance() + * 1.7 - Add create_volume() method, remove topic from live_migration() + + * 2.0 - Remove 1.x backwards compat + * 2.1 - Add image_id to create_volume() + * 2.2 - Remove reservations argument to create_volume() + * 2.3 - Remove create_volume() + * 2.4 - Change update_service_capabilities() + * accepts a list of capabilities + * 2.5 - Add get_backdoor_port() + * 2.6 - Add select_hosts() ... Grizzly supports message version 2.6. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.6. - 2.7 - Add select_destinations() - 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used - by the compute manager for retries. - 2.9 - Added the legacy_bdm_in_spec parameter to run_instance() + * 2.7 - Add select_destinations() + * 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used + by the compute manager for retries. + * 2.9 - Added the legacy_bdm_in_spec parameter to run_instance() ... Havana supports message version 2.9. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.9. - ... - Deprecated live_migration() call, moved to conductor - ... - Deprecated select_hosts() + * Deprecated live_migration() call, moved to conductor + * Deprecated select_hosts() 3.0 - Removed backwards compat ... Icehouse supports message version 3.0. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.0. + ''' VERSION_ALIASES = { From a76470f980c4d4d5ee6a8ccabf2e6f803f28648e Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jul 2014 15:16:03 -0700 Subject: [PATCH 252/486] docs - Fix doc build errors with SQLAlchemy 0.9 The Nova docs failed to build when using SQLAlchemy 0.9. Among the errors that are reported is this one: nova/db/sqlalchemy/api.py:docstring of nova.db.sqlalchemy.api.select:12: WARNING: undefined label: coretutorial_selecting (if the link has no caption the label must precede a section header) nova/openstack/common/db/sqlalchemy/utils.py:docstring of nova.openstack.common.db.sqlalchemy.utils.or_:26: WARNING: more than one target found for cross-reference u'and_': nova.db.sqlalchemy.api.and_, nova.virt.baremetal.db.sqlalchemy.migrate_repo.versions.006_move_prov_mac_address.and_ nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py: docstring of nova.virt.baremetal.db.sqlalchemy.migrate_repo.versions. 006_move_prov_mac_address.and_:35: WARNING: more than one target found for cross-reference u'or_': nova.db.sqlalchemy.api.or_, nova.openstack.common.db.sqlalchemy.utils.or_ nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py: docstring of nova.virt.baremetal.db.sqlalchemy.migrate_repo.versions. 006_move_prov_mac_address.select:12: WARNING: undefined label: coretutorial_selecting (if the link has no caption the label must precede a section header) To fix this, the imports causing breaks were removed and changed to more specific/better imports Change-Id: I83cf38c03cef8bb3186160a660d30979cc303839 --- nova/db/sqlalchemy/api.py | 10 ++++----- nova/openstack/common/db/sqlalchemy/utils.py | 6 ++--- nova/tests/db/test_db_api.py | 22 +++++++++---------- nova/tests/db/test_migration_utils.py | 6 ++--- .../versions/006_move_prov_mac_address.py | 10 +++++---- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ee8f60f459..839195dc5f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -40,9 +40,9 @@ from sqlalchemy.orm import joinedload_all from sqlalchemy.orm import noload from sqlalchemy.schema import Table +from sqlalchemy import sql from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc -from sqlalchemy.sql.expression import select from sqlalchemy.sql import false from sqlalchemy.sql import func from sqlalchemy.sql import null @@ -589,12 +589,12 @@ def compute_node_get_all(context, no_date_fields): def filter_columns(table): return [c for c in table.c if c.name not in redundant_columns] - compute_node_query = select(filter_columns(compute_node)).\ + compute_node_query = sql.select(filter_columns(compute_node)).\ where(compute_node.c.deleted == 0).\ order_by(compute_node.c.service_id) compute_node_rows = conn.execute(compute_node_query).fetchall() - service_query = select(filter_columns(service)).\ + service_query = sql.select(filter_columns(service)).\ where((service.c.deleted == 0) & (service.c.binary == 'nova-compute')).\ order_by(service.c.id) @@ -5635,10 +5635,10 @@ def archive_deleted_rows_for_table(context, tablename, max_rows): column = table.c.id # NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid # database's limit of maximum parameter in one SQL statement. - query_insert = select([table], + query_insert = sql.select([table], table.c.deleted != default_deleted_value).\ order_by(column).limit(max_rows) - query_delete = select([column], + query_delete = sql.select([column], table.c.deleted != default_deleted_value).\ order_by(column).limit(max_rows) diff --git a/nova/openstack/common/db/sqlalchemy/utils.py b/nova/openstack/common/db/sqlalchemy/utils.py index 9b7008fb39..02d8cf4848 100644 --- a/nova/openstack/common/db/sqlalchemy/utils.py +++ b/nova/openstack/common/db/sqlalchemy/utils.py @@ -29,7 +29,6 @@ from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData -from sqlalchemy import or_ from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import UpdateBase from sqlalchemy import String @@ -184,8 +183,9 @@ def _project_filter(query, db_model, context, project_only): if request_context.is_user_context(context) and project_only: if project_only == 'allow_none': is_none = None - query = query.filter(or_(db_model.project_id == context.project_id, - db_model.project_id == is_none)) + query = query.filter(sqlalchemy.sql.or_( + db_model.project_id == context.project_id, + db_model.project_id == is_none)) else: query = query.filter(db_model.project_id == context.project_id) diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 0784bd5852..9ffa0241e5 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -37,7 +37,7 @@ from sqlalchemy import MetaData from sqlalchemy.orm import exc as sqlalchemy_orm_exc from sqlalchemy.orm import query -from sqlalchemy.sql.expression import select +from sqlalchemy import sql from sqlalchemy import Table from nova import block_device @@ -6422,12 +6422,12 @@ def test_archive_deleted_rows(self): where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\ .values(deleted=1) self.conn.execute(update_statement) - qiim = select([self.instance_id_mappings]).where(self. + qiim = sql.select([self.instance_id_mappings]).where(self. instance_id_mappings.c.uuid.in_(self.uuidstrs)) rows = self.conn.execute(qiim).fetchall() # Verify we have 6 in main self.assertEqual(len(rows), 6) - qsiim = select([self.shadow_instance_id_mappings]).\ + qsiim = sql.select([self.shadow_instance_id_mappings]).\ where(self.shadow_instance_id_mappings.c.uuid.in_( self.uuidstrs)) rows = self.conn.execute(qsiim).fetchall() @@ -6491,12 +6491,12 @@ def _test_archive_deleted_rows_for_one_uuid_table(self, tablename): where(main_table.c.uuid.in_(self.uuidstrs[:4]))\ .values(deleted=1) self.conn.execute(update_statement) - qmt = select([main_table]).where(main_table.c.uuid.in_( + qmt = sql.select([main_table]).where(main_table.c.uuid.in_( self.uuidstrs)) rows = self.conn.execute(qmt).fetchall() # Verify we have 6 in main self.assertEqual(len(rows), 6) - qst = select([shadow_table]).\ + qst = sql.select([shadow_table]).\ where(shadow_table.c.uuid.in_(self.uuidstrs)) rows = self.conn.execute(qst).fetchall() # Verify we have 0 in shadow @@ -6535,11 +6535,11 @@ def test_archive_deleted_rows_no_id_column(self): where(self.dns_domains.c.domain == uuidstr0).\ values(deleted=True) self.conn.execute(update_statement) - qdd = select([self.dns_domains], self.dns_domains.c.domain == + qdd = sql.select([self.dns_domains], self.dns_domains.c.domain == uuidstr0) rows = self.conn.execute(qdd).fetchall() self.assertEqual(len(rows), 1) - qsdd = select([self.shadow_dns_domains], + qsdd = sql.select([self.shadow_dns_domains], self.shadow_dns_domains.c.domain == uuidstr0) rows = self.conn.execute(qsdd).fetchall() self.assertEqual(len(rows), 0) @@ -6600,21 +6600,21 @@ def test_archive_deleted_rows_2_tables(self): .values(deleted=1) self.conn.execute(update_statement2) # Verify we have 6 in each main table - qiim = select([self.instance_id_mappings]).where( + qiim = sql.select([self.instance_id_mappings]).where( self.instance_id_mappings.c.uuid.in_(self.uuidstrs)) rows = self.conn.execute(qiim).fetchall() self.assertEqual(len(rows), 6) - qi = select([self.instances]).where(self.instances.c.uuid.in_( + qi = sql.select([self.instances]).where(self.instances.c.uuid.in_( self.uuidstrs)) rows = self.conn.execute(qi).fetchall() self.assertEqual(len(rows), 6) # Verify we have 0 in each shadow table - qsiim = select([self.shadow_instance_id_mappings]).\ + qsiim = sql.select([self.shadow_instance_id_mappings]).\ where(self.shadow_instance_id_mappings.c.uuid.in_( self.uuidstrs)) rows = self.conn.execute(qsiim).fetchall() self.assertEqual(len(rows), 0) - qsi = select([self.shadow_instances]).\ + qsi = sql.select([self.shadow_instances]).\ where(self.shadow_instances.c.uuid.in_(self.uuidstrs)) rows = self.conn.execute(qsi).fetchall() self.assertEqual(len(rows), 0) diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py index ecdb298db7..6009a609e1 100644 --- a/nova/tests/db/test_migration_utils.py +++ b/nova/tests/db/test_migration_utils.py @@ -19,7 +19,7 @@ from sqlalchemy import Integer, String from sqlalchemy import MetaData, Table, Column from sqlalchemy.exc import NoSuchTableError -from sqlalchemy.sql import select +from sqlalchemy import sql from sqlalchemy.types import UserDefinedType from nova.db.sqlalchemy import api as db @@ -62,7 +62,7 @@ def test_delete_from_select(self): # Delete 4 rows in one chunk column = test_table.c.id - query_delete = select([column], + query_delete = sql.select([column], test_table.c.id < 5).order_by(column) delete_statement = utils.DeleteFromSelect(test_table, query_delete, column) @@ -70,7 +70,7 @@ def test_delete_from_select(self): # Verify we delete 4 rows self.assertEqual(result_delete.rowcount, 4) - query_all = select([test_table]).\ + query_all = sql.select([test_table]).\ where(test_table.c.uuid.in_(uuidstrs)) rows = conn.execute(query_all).fetchall() # Verify we still have 6 rows in table diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py index 5a1ec451a0..8921f3eb58 100644 --- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py +++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py @@ -13,8 +13,9 @@ # under the License. from nova.openstack.common import log as logging -from sqlalchemy import and_, MetaData, select, Table, exists +from sqlalchemy import MetaData, Table, exists from sqlalchemy import exc +from sqlalchemy import sql LOG = logging.getLogger(__name__) @@ -26,7 +27,7 @@ def upgrade(migrate_engine): nodes = Table('bm_nodes', meta, autoload=True) ifs = Table('bm_interfaces', meta, autoload=True) - q = select([nodes.c.id, nodes.c.prov_mac_address], + q = sql.select([nodes.c.id, nodes.c.prov_mac_address], from_obj=nodes) # Iterate all elements before starting insert since IntegrityError @@ -52,8 +53,9 @@ def downgrade(migrate_engine): nodes = Table('bm_nodes', meta, autoload=True) ifs = Table('bm_interfaces', meta, autoload=True) - subq = exists().where(and_(ifs.c.bm_node_id == nodes.c.id, - ifs.c.address == nodes.c.prov_mac_address)) + subq = exists().where(sql.and_( + ifs.c.bm_node_id == nodes.c.id, + ifs.c.address == nodes.c.prov_mac_address)) ifs.delete().where(subq).execute() From 3619c1914e9925e18c062bc11bc3e708137f8322 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 31 Jul 2014 14:00:54 -0700 Subject: [PATCH 253/486] Add a retry_on_deadlock to reservations_expire Now that we have an index for the reservations_expire query, we have uncovered a race with quota usage updates due to the foreign key on the table, so just retry if there is a deadlock. This is a fix to unblock the gate while we try to remove the deadlocks completely. Change-Id: I1993c3bd1f81facf1d98a4e29f9e8df4858a7d66 Partial-bug: #1350466 --- nova/db/sqlalchemy/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ee8f60f459..643180d3f2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3378,6 +3378,7 @@ def quota_destroy_all_by_project(context, project_id): @require_admin_context +@_retry_on_deadlock def reservation_expire(context): session = get_session() with session.begin(): From ed2aa220aaa19e2692df88710dd721d368859df7 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 1 Aug 2014 08:09:07 -0700 Subject: [PATCH 254/486] Only get image location attributes if including locations Commit 155eeabbfafd4f48b47fea5b403f61a6e5c28426 added 'direct_url' and 'locations' to the list of attributes to get from an image which makes a call back to glance if the attributes aren't on the image, and can result in a 404 if they aren't available, which would be in the glance v1 API case. This change simply passes the new include_locations parameter down to _extract_attributes so we can do the proper filtering. Change-Id: I903fa5c781fed52183b340dc3d9bc4b6598b21ce Partial-Bug: #1351333 --- nova/image/glance.py | 18 +++++++++++------- nova/tests/image/test_glance.py | 23 ++++++++++++++++------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index 970e2a4f2b..1f2a0664e4 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -307,16 +307,14 @@ def show(self, context, image_id, include_locations=False): if not _is_image_available(context, image): raise exception.ImageNotFound(image_id=image_id) - image = _translate_from_glance(image) + image = _translate_from_glance(image, + include_locations=include_locations) if include_locations: locations = image.get('locations', None) or [] du = image.get('direct_url', None) if du: locations.append({'url': du, 'metadata': {}}) image['locations'] = locations - else: - image.pop('locations', None) - image.pop('direct_url', None) return image @@ -482,8 +480,9 @@ def _translate_to_glance(image_meta): return image_meta -def _translate_from_glance(image): - image_meta = _extract_attributes(image) +def _translate_from_glance(image, include_locations=False): + image_meta = _extract_attributes(image, + include_locations=include_locations) image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) return image_meta @@ -532,7 +531,7 @@ def _convert_to_string(metadata): return _convert(_json_dumps, metadata) -def _extract_attributes(image): +def _extract_attributes(image, include_locations=False): # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform # a get(), resulting in a useless request back to glance. This list is # therefore sorted, with dependent attributes as the end @@ -547,6 +546,7 @@ def _extract_attributes(image): queued = getattr(image, 'status') == 'queued' queued_exclude_attrs = ['disk_format', 'container_format'] + include_locations_attrs = ['direct_url', 'locations'] output = {} for attr in IMAGE_ATTRIBUTES: @@ -560,6 +560,10 @@ def _extract_attributes(image): # NOTE(liusheng): queued image may not have these attributes and 'name' elif queued and attr in queued_exclude_attrs: output[attr] = getattr(image, attr, None) + # NOTE(mriedem): Only get location attrs if including locations. + elif attr in include_locations_attrs: + if include_locations: + output[attr] = getattr(image, attr, None) else: # NOTE(xarses): Anything that is caught with the default value # will result in a additional lookup to glance for said attr. diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 5334e38f48..394c3d1fbd 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -454,7 +454,7 @@ def test_glance_client_image_ref(self): self.assertEqual(same_id, image_id) self.assertEqual(service._client.host, 'something-less-likely') - def test_extracting_missing_attributes(self): + def _test_extracting_missing_attributes(self, include_locations): """Verify behavior from glance objects that are missing attributes This fakes the image class and is missing attribute as the client can @@ -475,7 +475,8 @@ def __init__(self, metadata): 'updated_at': self.NOW_DATETIME, } image = MyFakeGlanceImage(metadata) - observed = glance._extract_attributes(image) + observed = glance._extract_attributes( + image, include_locations=include_locations) expected = { 'id': 1, 'name': None, @@ -492,12 +493,19 @@ def __init__(self, metadata): 'deleted': None, 'status': None, 'properties': {}, - 'owner': None, - 'locations': None, - 'direct_url': None + 'owner': None } + if include_locations: + expected['locations'] = None + expected['direct_url'] = None self.assertEqual(expected, observed) + def test_extracting_missing_attributes_include_locations(self): + self._test_extracting_missing_attributes(include_locations=True) + + def test_extracting_missing_attributes_exclude_locations(self): + self._test_extracting_missing_attributes(include_locations=False) + def _create_failing_glance_client(info): class MyGlanceStubClient(glance_stubs.StubGlanceClient): @@ -652,7 +660,7 @@ def test_show_success(self, is_avail_mock, trans_from_mock): client.call.assert_called_once_with(ctx, 1, 'get', mock.sentinel.image_id) is_avail_mock.assert_called_once_with(ctx, {}) - trans_from_mock.assert_called_once_with({}) + trans_from_mock.assert_called_once_with({}, include_locations=False) self.assertIn('mock', info) self.assertEqual(mock.sentinel.trans_from, info['mock']) @@ -743,7 +751,8 @@ def test_include_locations_success(self, avail_mock, trans_from_mock): client.call.assert_called_once_with(ctx, 2, 'get', image_id) avail_mock.assert_called_once_with(ctx, mock.sentinel.image) - trans_from_mock.assert_called_once_with(mock.sentinel.image) + trans_from_mock.assert_called_once_with(mock.sentinel.image, + include_locations=True) self.assertIn('locations', info) self.assertEqual(locations, info['locations']) From 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b Mon Sep 17 00:00:00 2001 From: Akash Gangil Date: Sat, 2 Aug 2014 05:37:38 -0700 Subject: [PATCH 255/486] Remove ESXDriver from Juno. 1. Removes the VMwareESXDriver code in nova/virt/vmwareapi/driver.py by either deleting the redundant methods or moving them to VMwareVCDriver. 2. Changes the test cases to use VMwareVCDriver and also removes duplicate test cases which were previously being testing both VMwareESXDriver and VMwareVCDriver to just the latter. DocImpact Closes-Bug: #1346637 Change-Id: I718fc0ee67dbd625af00c20fa4e34b8a35015437 --- nova/tests/virt/vmwareapi/fake.py | 15 +- nova/tests/virt/vmwareapi/test_configdrive.py | 2 +- nova/tests/virt/vmwareapi/test_driver_api.py | 146 +------ nova/tests/virt/vmwareapi/test_vim_util.py | 2 +- nova/virt/vmwareapi/__init__.py | 5 +- nova/virt/vmwareapi/driver.py | 384 ++++-------------- 6 files changed, 105 insertions(+), 449 deletions(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index 81c92402e2..68636cbd17 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -48,7 +48,7 @@ def log_db_contents(msg=None): {'text': msg or "", 'content': pprint.pformat(_db_content)}) -def reset(vc=False): +def reset(): """Resets the db contents.""" cleanup() create_network() @@ -56,16 +56,13 @@ def reset(vc=False): create_host_storage_system() ds_ref1 = create_datastore('ds1', 1024, 500) create_host(ds_ref=ds_ref1) - if vc: - ds_ref2 = create_datastore('ds2', 1024, 500) - create_host(ds_ref=ds_ref2) + ds_ref2 = create_datastore('ds2', 1024, 500) + create_host(ds_ref=ds_ref2) create_datacenter('dc1', ds_ref1) - if vc: - create_datacenter('dc2', ds_ref2) + create_datacenter('dc2', ds_ref2) create_res_pool() - if vc: - create_cluster('test_cluster', ds_ref1) - create_cluster('test_cluster2', ds_ref2) + create_cluster('test_cluster', ds_ref1) + create_cluster('test_cluster2', ds_ref2) def cleanup(): diff --git a/nova/tests/virt/vmwareapi/test_configdrive.py b/nova/tests/virt/vmwareapi/test_configdrive.py index a52280afbe..f9a1e7949d 100644 --- a/nova/tests/virt/vmwareapi/test_configdrive.py +++ b/nova/tests/virt/vmwareapi/test_configdrive.py @@ -47,7 +47,7 @@ def setUp(self): host_password='test_pass', use_linked_clone=False, group='vmware') self.flags(vnc_enabled=False) - vmwareapi_fake.reset(vc=True) + vmwareapi_fake.reset() stubs.set_stubs(self.stubs) nova.tests.image.fake.stub_out_image_service(self.stubs) self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 4333fa0b7c..c307ac2766 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -54,7 +54,6 @@ from nova.tests.virt.vmwareapi import stubs from nova import utils as nova_utils from nova.virt import driver as v_driver -from nova.virt import fake from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util @@ -307,10 +306,12 @@ def setUp(self): super(VMwareAPIVMTestCase, self).setUp() vm_util.vm_refs_cache_reset() self.context = context.RequestContext('fake', 'fake', is_admin=False) - self.flags(host_ip='test_url', + cluster_name = 'test_cluster' + cluster_name2 = 'test_cluster2' + self.flags(cluster_name=[cluster_name, cluster_name2], + host_ip='test_url', host_username='test_username', host_password='test_pass', - datastore_regex='.*', api_retry_count=1, use_linked_clone=False, group='vmware') self.flags(vnc_enabled=False, @@ -318,13 +319,17 @@ def setUp(self): my_ip='') self.user_id = 'fake' self.project_id = 'fake' - self.node_name = 'test_url' - self.ds = 'ds1' self.context = context.RequestContext(self.user_id, self.project_id) stubs.set_stubs(self.stubs) vmwareapi_fake.reset() nova.tests.image.fake.stub_out_image_service(self.stubs) - self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI) + self.conn = driver.VMwareVCDriver(None, False) + self.node_name = self.conn._resources.keys()[0] + self.node_name2 = self.conn._resources.keys()[1] + if cluster_name2 in self.node_name2: + self.ds = 'ds1' + else: + self.ds = 'ds2' self.vim = vmwareapi_fake.FakeVim() # NOTE(vish): none of the network plugging code is actually @@ -341,7 +346,7 @@ def setUp(self): } self.fake_image_uuid = self.image['id'] nova.tests.image.fake.stub_out_image_service(self.stubs) - self.vnc_host = 'test_url' + self.vnc_host = 'ha-host' self._set_exception_vars() self.instance_without_compute = {'node': None, 'vm_state': 'building', @@ -1166,12 +1171,6 @@ def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False): network_info=self.network_info, block_device_info=block_device_info) - def test_spawn_attach_volume_vmdk(self): - self._spawn_attach_volume_vmdk() - - def test_spawn_attach_volume_vmdk_no_image_ref(self): - self._spawn_attach_volume_vmdk(set_image_ref=False) - def test_spawn_attach_volume_iscsi(self): self._create_instance() self.mox.StubOutWithMock(block_device, 'volume_in_mapping') @@ -1663,15 +1662,6 @@ def fake_detach_disk_from_vm(vm_ref, instance, self.test_vm_ref = None self.test_device_name = None - def test_pause(self): - # Tests that the VMwareESXDriver does not implement the pause method. - self.assertRaises(NotImplementedError, self.conn.pause, instance=None) - - def test_unpause(self): - # Tests that the VMwareESXDriver does not implement the unpause method. - self.assertRaises(NotImplementedError, self.conn.unpause, - instance=None) - def test_get_diagnostics(self): self._create_vm() expected = {'memoryReservation': 0, 'suspendInterval': 0, @@ -1723,12 +1713,6 @@ def _test_finish_migration(self, power_on, resize_instance=False): image_meta=None, power_on=power_on) - def test_confirm_migration(self): - self._create_vm() - self.assertRaises(NotImplementedError, - self.conn.confirm_migration, self.context, - self.instance, None) - def _test_finish_revert_migration(self, power_on): self._create_vm() # Ensure ESX driver throws an error @@ -1738,18 +1722,6 @@ def _test_finish_revert_migration(self, power_on): instance=self.instance, network_info=None) - def test_finish_revert_migration_power_on(self): - self._test_finish_revert_migration(power_on=True) - - def test_finish_revert_migration_power_off(self): - self._test_finish_revert_migration(power_on=False) - - def test_get_console_pool_info(self): - info = self.conn.get_console_pool_info("console_type") - self.assertEqual(info['address'], 'test_url') - self.assertEqual(info['username'], 'test_username') - self.assertEqual(info['password'], 'test_pass') - def test_get_vnc_console_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, @@ -1777,9 +1749,6 @@ def test_get_vnc_console_noport(self): self.context, self.instance) - def test_host_ip_addr(self): - self.assertEqual(self.conn.get_host_ip_addr(), "test_url") - def test_get_volume_connector(self): self._create_vm() connector_dict = self.conn.get_volume_connector(self.instance) @@ -2084,65 +2053,6 @@ def test_image_aging_not_aged(self): self._cached_files_exist() -class VMwareAPIHostTestCase(test.NoDBTestCase, - test_driver.DriverAPITestHelper): - """Unit tests for Vmware API host calls.""" - - def setUp(self): - super(VMwareAPIHostTestCase, self).setUp() - self.flags(image_cache_subdirectory_name='vmware_base') - vm_util.vm_refs_cache_reset() - self.flags(host_ip='test_url', - host_username='test_username', - host_password='test_pass', group='vmware') - vmwareapi_fake.reset() - stubs.set_stubs(self.stubs) - self.conn = driver.VMwareESXDriver(False) - - def tearDown(self): - super(VMwareAPIHostTestCase, self).tearDown() - vmwareapi_fake.cleanup() - - def test_public_api_signatures(self): - self.assertPublicAPISignatures(self.conn) - - def test_host_state(self): - stats = self.conn.get_host_stats() - self.assertEqual(stats['vcpus'], 16) - self.assertEqual(stats['disk_total'], 1024) - self.assertEqual(stats['disk_available'], 500) - self.assertEqual(stats['disk_used'], 1024 - 500) - self.assertEqual(stats['host_memory_total'], 1024) - self.assertEqual(stats['host_memory_free'], 1024 - 500) - self.assertEqual(stats['hypervisor_version'], 5000000) - supported_instances = [('i686', 'vmware', 'hvm'), - ('x86_64', 'vmware', 'hvm')] - self.assertEqual(stats['supported_instances'], supported_instances) - - def _test_host_action(self, method, action, expected=None): - result = method('host', action) - self.assertEqual(result, expected) - - def test_host_reboot(self): - self._test_host_action(self.conn.host_power_action, 'reboot') - - def test_host_shutdown(self): - self._test_host_action(self.conn.host_power_action, 'shutdown') - - def test_host_startup(self): - self._test_host_action(self.conn.host_power_action, 'startup') - - def test_host_maintenance_on(self): - self._test_host_action(self.conn.host_maintenance_mode, True) - - def test_host_maintenance_off(self): - self._test_host_action(self.conn.host_maintenance_mode, False) - - def test_get_host_uptime(self): - result = self.conn.get_host_uptime('host') - self.assertEqual('Please refer to test_url for the uptime', result) - - class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase, test_driver.DriverAPITestHelper): @@ -2156,7 +2066,7 @@ def setUp(self): task_poll_interval=10, datastore_regex='.*', group='vmware') self.flags(vnc_enabled=False, image_cache_subdirectory_name='vmware_base') - vmwareapi_fake.reset(vc=True) + vmwareapi_fake.reset() self.conn = driver.VMwareVCDriver(None, False) self.node_name = self.conn._resources.keys()[0] self.node_name2 = self.conn._resources.keys()[1] @@ -2201,36 +2111,6 @@ def side_effect(): vcdriver._session._create_session.side_effect = side_effect return vcdriver - @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__') - def test_init_host_and_cleanup_host(self, mock_init): - vcdriver = self._setup_mocks_for_session(mock_init) - vcdriver.init_host("foo") - vcdriver._session._create_session.assert_called_once_with() - - vcdriver.cleanup_host("foo") - vcdriver._session.vim.client.service.Logout.assert_called_once_with( - mock.ANY) - - @mock.patch('nova.virt.vmwareapi.driver.LOG') - @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__') - def test_cleanup_host_with_no_login(self, mock_init, mock_logger): - vcdriver = self._setup_mocks_for_session(mock_init) - vcdriver.init_host("foo") - vcdriver._session._create_session.assert_called_once_with() - - # Not logged in... - # observe that no exceptions were thrown - mock_sc = mock.Mock() - vcdriver._session.vim.retrieve_service_content.return_value = mock_sc - web_fault = suds.WebFault(mock.Mock(), mock.Mock()) - vcdriver._session.vim.client.service.Logout.side_effect = web_fault - vcdriver.cleanup_host("foo") - - # assert that the mock Logout method was never called - vcdriver._session.vim.client.service.Logout.assert_called_once_with( - mock.ANY) - mock_logger.debug.assert_called_once_with(mock.ANY) - def test_host_power_action(self): self.assertRaises(NotImplementedError, self.conn.host_power_action, 'host', 'action') diff --git a/nova/tests/virt/vmwareapi/test_vim_util.py b/nova/tests/virt/vmwareapi/test_vim_util.py index a8aef84eb3..fe5ef155f9 100644 --- a/nova/tests/virt/vmwareapi/test_vim_util.py +++ b/nova/tests/virt/vmwareapi/test_vim_util.py @@ -43,7 +43,7 @@ class VMwareVIMUtilTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVIMUtilTestCase, self).setUp() - fake.reset(vc=True) + fake.reset() self.vim = fake.FakeVim() self.vim._login() diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py index 9fdf6a7b8d..022e525284 100644 --- a/nova/virt/vmwareapi/__init__.py +++ b/nova/virt/vmwareapi/__init__.py @@ -13,10 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. """ -:mod:`vmwareapi` -- Nova support for VMware ESX/vCenter through VMware API. +:mod:`vmwareapi` -- Nova support for VMware vCenter through VMware API. """ # NOTE(sdague) for nicer compute_driver specification from nova.virt.vmwareapi import driver +# VMwareESXDriver is deprecated in Juno. This property definition +# allows those configurations to work which reference it while +# logging a deprecation warning VMwareESXDriver = driver.VMwareESXDriver VMwareVCDriver = driver.VMwareVCDriver diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 8cef636917..0adb52de97 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -16,7 +16,7 @@ # under the License. """ -A connection to the VMware ESX/vCenter platform. +A connection to the VMware vCenter platform. """ import re @@ -25,10 +25,9 @@ from eventlet import event from oslo.config import cfg -import suds from nova import exception -from nova.i18n import _, _LC +from nova.i18n import _, _LC, _LW from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall @@ -47,19 +46,18 @@ vmwareapi_opts = [ cfg.StrOpt('host_ip', - help='Hostname or IP address for connection to VMware ESX/VC ' + help='Hostname or IP address for connection to VMware VC ' 'host.'), cfg.IntOpt('host_port', default=443, - help='Port for connection to VMware ESX/VC host.'), + help='Port for connection to VMware VC host.'), cfg.StrOpt('host_username', - help='Username for connection to VMware ESX/VC host.'), + help='Username for connection to VMware VC host.'), cfg.StrOpt('host_password', - help='Password for connection to VMware ESX/VC host.', + help='Password for connection to VMware VC host.', secret=True), cfg.MultiStrOpt('cluster_name', - help='Name of a VMware Cluster ComputeResource. Used only if ' - 'compute_driver is vmwareapi.VMwareVCDriver.'), + help='Name of a VMware Cluster ComputeResource.'), cfg.StrOpt('datastore_regex', help='Regex to match the name of a datastore.'), cfg.FloatOpt('task_poll_interval', @@ -86,38 +84,39 @@ TIME_BETWEEN_API_CALL_RETRIES = 1.0 +# The following class was removed in the transition from Icehouse to +# Juno, but may still be referenced in configuration files. The +# following stub allow those configurations to work while logging a +# deprecation warning. class VMwareESXDriver(driver.ComputeDriver): """The ESX host connection object.""" + def _do_deprecation_warning(self): + LOG.warn(_LW('The VMware ESX driver is now deprecated and has been ' + 'removed in the Juno release. The VC driver will remain ' + 'and continue to be supported.')) + + def __init__(self, virtapi, read_only=False, scheme="https"): + self._do_deprecation_warning() + + +class VMwareVCDriver(driver.ComputeDriver): + """The VC host connection object.""" + capabilities = { "has_imagecache": True, "supports_recreate": False, } - # VMwareAPI has both ESXi and vCenter API sets. - # The ESXi API are a proper sub-set of the vCenter API. - # That is to say, nearly all valid ESXi calls are - # valid vCenter calls. There are some small edge-case - # exceptions regarding VNC, CIM, User management & SSO. - - def _do_deprecation_warning(self): - LOG.warning(_('The VMware ESX driver is now deprecated and will be ' - 'removed in the Juno release. The VC driver will remain ' - 'and continue to be supported.')) + # The vCenter driver includes API that acts on ESX hosts or groups + # of ESX hosts in clusters or non-cluster logical-groupings. + # + # vCenter is not a hypervisor itself, it works with multiple + # hypervisor host machines and their guests. This fact can + # subtly alter how vSphere and OpenStack interoperate. def __init__(self, virtapi, scheme="https"): - super(VMwareESXDriver, self).__init__(virtapi) - - self._do_deprecation_warning() - - self._host_ip = CONF.vmware.host_ip - if not (self._host_ip or CONF.vmware.host_username is None or - CONF.vmware.host_password is None): - raise Exception(_("Must specify host_ip, " - "host_username " - "and host_password to use " - "compute_driver=vmwareapi.VMwareESXDriver or " - "vmwareapi.VMwareVCDriver")) + super(VMwareVCDriver, self).__init__(virtapi) self._datastore_regex = None if CONF.vmware.datastore_regex: @@ -129,110 +128,47 @@ def __init__(self, virtapi, scheme="https"): % CONF.vmware.datastore_regex) self._session = VMwareAPISession(scheme=scheme) - self._volumeops = volumeops.VMwareVolumeOps(self._session) - self._vmops = vmops.VMwareVMOps(self._session, self.virtapi, - self._volumeops, - datastore_regex=self._datastore_regex) - self._host = host.Host(self._session) - self._host_state = None # TODO(hartsocks): back-off into a configuration test module. if CONF.vmware.use_linked_clone is None: raise error_util.UseLinkedCloneConfigurationFault() - @property - def host_state(self): - if not self._host_state: - self._host_state = host.HostState(self._session, - self._host_ip) - return self._host_state - - def init_host(self, host): - vim = self._session.vim - if vim is None: - self._session._create_session() - - def cleanup_host(self, host): - # NOTE(hartsocks): we lean on the init_host to force the vim object - # to not be None. - vim = self._session.vim - service_content = vim.get_service_content() - session_manager = service_content.sessionManager - try: - vim.client.service.Logout(session_manager) - except suds.WebFault: - LOG.debug("No vSphere session was open during cleanup_host.") - - def list_instances(self): - """List VM instances.""" - return self._vmops.list_instances() - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, network_info=None, block_device_info=None): - """Create VM instance.""" - self._vmops.spawn(context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info) - - def snapshot(self, context, instance, image_id, update_task_state): - """Create snapshot from a running VM instance.""" - self._vmops.snapshot(context, instance, image_id, update_task_state) - - def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): - """Reboot VM instance.""" - self._vmops.reboot(instance, network_info) + # Get the list of clusters to be used + self._cluster_names = CONF.vmware.cluster_name + self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session, + self._cluster_names) + if not self.dict_mors: + raise exception.NotFound(_("All clusters specified %s were not" + " found in the vCenter") + % self._cluster_names) - def destroy(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None): - """Destroy VM instance.""" + # Check if there are any clusters that were specified in the nova.conf + # but are not in the vCenter, for missing clusters log a warning. + clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()] + missing_clusters = set(self._cluster_names) - set(clusters_found) + if missing_clusters: + LOG.warn(_LW("The following clusters could not be found in the " + "vCenter %s") % list(missing_clusters)) - # Destroy gets triggered when Resource Claim in resource_tracker - # is not successful. When resource claim is not successful, - # node is not set in instance. Perform destroy only if node is set - if not instance['node']: - return + # The _resources is used to maintain the vmops, volumeops and vcstate + # objects per cluster + self._resources = {} + self._resource_keys = set() + self._virtapi = virtapi + self._update_resources() - self._vmops.destroy(instance, destroy_disks) + # The following initialization is necessary since the base class does + # not use VC state. + first_cluster = self._resources.keys()[0] + self._vmops = self._resources.get(first_cluster).get('vmops') + self._volumeops = self._resources.get(first_cluster).get('volumeops') + self._vc_state = self._resources.get(first_cluster).get('vcstate') def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" pass - def pause(self, instance): - """Pause VM instance.""" - self._vmops.pause(instance) - - def unpause(self, instance): - """Unpause paused VM instance.""" - self._vmops.unpause(instance) - - def suspend(self, instance): - """Suspend the specified instance.""" - self._vmops.suspend(instance) - - def resume(self, context, instance, network_info, block_device_info=None): - """Resume the suspended VM instance.""" - self._vmops.resume(instance) - - def rescue(self, context, instance, network_info, image_meta, - rescue_password): - """Rescue the specified instance.""" - self._vmops.rescue(context, instance, network_info, image_meta) - - def unrescue(self, instance, network_info): - """Unrescue the specified instance.""" - self._vmops.unrescue(instance) - - def power_off(self, instance): - """Power off the specified instance.""" - self._vmops.power_off(instance) - - def power_on(self, context, instance, network_info, - block_device_info=None): - """Power on the specified instance.""" - self._vmops.power_on(instance) - def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" @@ -240,11 +176,11 @@ def resume_state_on_host_boot(self, context, instance, network_info, # anything if it is. instances = self.list_instances() if instance['uuid'] not in instances: - LOG.warn(_('Instance cannot be found in host, or in an unknown' - 'state.'), instance=instance) + LOG.warn(_LW('Instance cannot be found in host, or in an unknown' + 'state.'), instance=instance) else: state = vm_util.get_vm_state_from_name(self._session, - instance['uuid']) + instance['uuid']) ignored_states = ['poweredon', 'suspended'] if state.lower() in ignored_states: @@ -253,187 +189,13 @@ def resume_state_on_host_boot(self, context, instance, network_info, # Be as absolute as possible about getting it back into # a known and running state. self.reboot(context, instance, network_info, 'hard', - block_device_info) - - def poll_rebooting_instances(self, timeout, instances): - """Poll for rebooting instances.""" - self._vmops.poll_rebooting_instances(timeout, instances) - - def get_info(self, instance): - """Return info about the VM instance.""" - return self._vmops.get_info(instance) - - def get_diagnostics(self, instance): - """Return data about VM diagnostics.""" - data = self._vmops.get_diagnostics(instance) - return data - - def get_instance_diagnostics(self, instance): - """Return data about VM diagnostics.""" - data = self._vmops.get_instance_diagnostics(instance) - return data - - def get_vnc_console(self, context, instance): - """Return link to instance's VNC console.""" - return self._vmops.get_vnc_console(instance) - - def get_volume_connector(self, instance): - """Return volume connector information.""" - return self._volumeops.get_volume_connector(instance) - - def get_host_ip_addr(self): - """Retrieves the IP address of the ESX host.""" - return self._host_ip - - def attach_volume(self, context, connection_info, instance, mountpoint, - disk_bus=None, device_type=None, encryption=None): - """Attach volume storage to VM instance.""" - return self._volumeops.attach_volume(connection_info, - instance, - mountpoint) - - def detach_volume(self, connection_info, instance, mountpoint, - encryption=None): - """Detach volume storage to VM instance.""" - return self._volumeops.detach_volume(connection_info, - instance, - mountpoint) - - def get_console_pool_info(self, console_type): - """Get info about the host on which the VM resides.""" - return {'address': CONF.vmware.host_ip, - 'username': CONF.vmware.host_username, - 'password': CONF.vmware.host_password} - - def _get_available_resources(self, host_stats): - return {'vcpus': host_stats['vcpus'], - 'memory_mb': host_stats['host_memory_total'], - 'local_gb': host_stats['disk_total'], - 'vcpus_used': 0, - 'memory_mb_used': host_stats['host_memory_total'] - - host_stats['host_memory_free'], - 'local_gb_used': host_stats['disk_used'], - 'hypervisor_type': host_stats['hypervisor_type'], - 'hypervisor_version': host_stats['hypervisor_version'], - 'hypervisor_hostname': host_stats['hypervisor_hostname'], - 'cpu_info': jsonutils.dumps(host_stats['cpu_info']), - 'supported_instances': jsonutils.dumps( - host_stats['supported_instances']), - } - - def get_available_resource(self, nodename): - """Retrieve resource information. - - This method is called when nova-compute launches, and - as part of a periodic task that records the results in the DB. - - :returns: dictionary describing resources - - """ - host_stats = self.get_host_stats(refresh=True) - - # Updating host information - return self._get_available_resources(host_stats) - - def get_host_stats(self, refresh=False): - """Return the current state of the host. - - If 'refresh' is True, run the update first. - """ - return self.host_state.get_host_stats(refresh=refresh) - - def host_power_action(self, host, action): - """Reboots, shuts down or powers up the host.""" - return self._host.host_power_action(host, action) - - def host_maintenance_mode(self, host, mode): - """Start/Stop host maintenance window. On start, it triggers - guest VMs evacuation. - """ - return self._host.host_maintenance_mode(host, mode) - - def set_host_enabled(self, host, enabled): - """Sets the specified host's ability to accept new instances.""" - return self._host.set_host_enabled(host, enabled) - - def get_host_uptime(self, host): - return 'Please refer to %s for the uptime' % CONF.vmware.host_ip - - def inject_network_info(self, instance, nw_info): - """inject network info for specified instance.""" - self._vmops.inject_network_info(instance, nw_info) + block_device_info) def list_instance_uuids(self): """List VM instance UUIDs.""" uuids = self._vmops.list_instances() return [uuid for uuid in uuids if uuidutils.is_uuid_like(uuid)] - def manage_image_cache(self, context, all_instances): - """Manage the local cache of images.""" - self._vmops.manage_image_cache(context, all_instances) - - def instance_exists(self, instance): - """Efficient override of base instance_exists method.""" - return self._vmops.instance_exists(instance) - - def attach_interface(self, instance, image_meta, vif): - """Attach an interface to the instance.""" - self._vmops.attach_interface(instance, image_meta, vif) - - def detach_interface(self, instance, vif): - """Detach an interface from the instance.""" - self._vmops.detach_interface(instance, vif) - - -class VMwareVCDriver(VMwareESXDriver): - """The VC host connection object.""" - - # The vCenter driver includes several additional VMware vSphere - # capabilities that include API that act on hosts or groups of - # hosts in clusters or non-cluster logical-groupings. - # - # vCenter is not a hypervisor itself, it works with multiple - # hypervisor host machines and their guests. This fact can - # subtly alter how vSphere and OpenStack interoperate. - - def _do_deprecation_warning(self): - # Driver validated by VMware's Minesweeper CI - pass - - def __init__(self, virtapi, scheme="https"): - super(VMwareVCDriver, self).__init__(virtapi, scheme) - - # Get the list of clusters to be used - self._cluster_names = CONF.vmware.cluster_name - self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session, - self._cluster_names) - if not self.dict_mors: - raise exception.NotFound(_("All clusters specified %s were not" - " found in the vCenter") - % self._cluster_names) - - # Check if there are any clusters that were specified in the nova.conf - # but are not in the vCenter, for missing clusters log a warning. - clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()] - missing_clusters = set(self._cluster_names) - set(clusters_found) - if missing_clusters: - LOG.warn(_("The following clusters could not be found in the" - " vCenter %s") % list(missing_clusters)) - - # The _resources is used to maintain the vmops, volumeops and vcstate - # objects per cluster - self._resources = {} - self._resource_keys = set() - self._virtapi = virtapi - self._update_resources() - - # The following initialization is necessary since the base class does - # not use VC state. - first_cluster = self._resources.keys()[0] - self._vmops = self._resources.get(first_cluster).get('vmops') - self._volumeops = self._resources.get(first_cluster).get('volumeops') - self._vc_state = self._resources.get(first_cluster).get('vcstate') - def list_instances(self): """List VM instances from all nodes.""" instances = [] @@ -492,9 +254,7 @@ def rollback_live_migration_at_destination(self, context, instance, def get_vnc_console(self, context, instance): """Return link to instance's VNC console using vCenter logic.""" - # In this situation, ESXi and vCenter require different - # API logic to create a valid VNC console connection object. - # In specific, vCenter does not actually run the VNC service + # vCenter does not actually run the VNC service # itself. You must talk to the VNC host underneath vCenter. _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.get_vnc_console(instance) @@ -584,6 +344,22 @@ def _get_vc_state_for_compute_node(self, nodename): resource = self._get_resource_for_node(nodename) return resource['vcstate'] + def _get_available_resources(self, host_stats): + return {'vcpus': host_stats['vcpus'], + 'memory_mb': host_stats['host_memory_total'], + 'local_gb': host_stats['disk_total'], + 'vcpus_used': 0, + 'memory_mb_used': host_stats['host_memory_total'] - + host_stats['host_memory_free'], + 'local_gb_used': host_stats['disk_used'], + 'hypervisor_type': host_stats['hypervisor_type'], + 'hypervisor_version': host_stats['hypervisor_version'], + 'hypervisor_hostname': host_stats['hypervisor_hostname'], + 'cpu_info': jsonutils.dumps(host_stats['cpu_info']), + 'supported_instances': jsonutils.dumps( + host_stats['supported_instances']), + } + def get_available_resource(self, nodename): """Retrieve resource info. @@ -822,7 +598,7 @@ def detach_interface(self, instance, vif): class VMwareAPISession(object): - """Sets up a session with the VC/ESX host and handles all + """Sets up a session with the VC host and handles all the calls made to the host. """ @@ -848,7 +624,7 @@ def _get_vim_object(self): port=self._host_port) def _create_session(self): - """Creates a session with the VC/ESX host.""" + """Creates a session with the VC host.""" delay = 1 From 2a5e4645a900e4a688ff21234af8d6c34839de2e Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 16 Jun 2014 06:57:48 -0700 Subject: [PATCH 256/486] Log cleanups for nova.network.neutron.api This patch adds hints for log error and warning levels. TrivialFix Change-Id: Ibd516b41cc40afafe2dc37cdc5657d494c33c2cf --- nova/network/neutronv2/api.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f1a3654e44..42eb79f08d 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -24,7 +24,7 @@ from nova.compute import utils as compute_utils from nova import conductor from nova import exception -from nova.i18n import _, _LW +from nova.i18n import _, _LE, _LW from nova.network import base_api from nova.network import model as network_model from nova.network import neutronv2 @@ -219,7 +219,7 @@ def _create_port(self, port_client, instance, network_id, port_req_body, raise exception.NoMoreFixedIps() except neutron_client_exc.NeutronClientException: with excutils.save_and_reraise_exception(): - LOG.exception(_('Neutron error creating port on network %s'), + LOG.exception(_LE('Neutron error creating port on network %s'), network_id, instance=instance) def allocate_for_instance(self, context, instance, **kwargs): @@ -287,7 +287,7 @@ def allocate_for_instance(self, context, instance, **kwargs): net_ids) if not nets: - LOG.warn(_("No network configured!"), instance=instance) + LOG.warn(_LW("No network configured!"), instance=instance) return network_model.NetworkInfo([]) security_groups = kwargs.get('security_groups', []) @@ -377,14 +377,14 @@ def allocate_for_instance(self, context, instance, **kwargs): port_client = neutron port_client.update_port(port_id, port_req_body) except Exception: - msg = _("Failed to update port %s") + msg = _LE("Failed to update port %s") LOG.exception(msg, port_id) for port_id in created_port_ids: try: neutron.delete_port(port_id) except Exception: - msg = _("Failed to delete port %s") + msg = _LE("Failed to delete port %s") LOG.exception(msg, port_id) nw_info = self.get_instance_nw_info(context, instance, networks=nets, @@ -455,10 +455,10 @@ def deallocate_for_instance(self, context, instance, **kwargs): neutron.delete_port(port) except neutronv2.exceptions.NeutronClientException as e: if e.status_code == 404: - LOG.warning(_("Port %s does not exist"), port) + LOG.warning(_LW("Port %s does not exist"), port) else: with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete neutron port %s"), + LOG.exception(_LE("Failed to delete neutron port %s"), port) # NOTE(arosen): This clears out the network_cache only if the instance @@ -482,7 +482,7 @@ def deallocate_port_for_instance(self, context, instance, port_id): try: neutronv2.get_client(context).delete_port(port_id) except Exception: - LOG.exception(_("Failed to delete neutron port %s") % + LOG.exception(_LE("Failed to delete neutron port %s"), port_id) return self.get_instance_nw_info(context, instance) @@ -655,7 +655,7 @@ def validate_networks(self, context, requested_networks, num_instances): port = None else: with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to access port %s"), + LOG.exception(_LE("Failed to access port %s"), port_id) if not port: raise exception.PortNotFound(port_id=port_id) @@ -884,7 +884,7 @@ def get_floating_ip(self, context, id): raise exception.FloatingIpNotFound(id=id) else: with excutils.save_and_reraise_exception(): - LOG.exception(_('Unable to access floating IP %s'), id) + LOG.exception(_LE('Unable to access floating IP %s'), id) pool_dict = self._setup_net_dict(client, fip['floating_network_id']) port_dict = self._setup_port_dict(client, fip['port_id']) @@ -1016,8 +1016,8 @@ def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port): if e.status_code == 404: return [] with excutils.save_and_reraise_exception(): - LOG.exception(_('Unable to access floating IP %(fixed_ip)s ' - 'for port %(port_id)s'), + LOG.exception(_LE('Unable to access floating IP %(fixed_ip)s ' + 'for port %(port_id)s'), {'fixed_ip': fixed_ip, 'port_id': port}) return data['floatingips'] @@ -1075,7 +1075,7 @@ def migrate_instance_finish(self, context, instance, migration): neutron.update_port(p['id'], port_req_body) except Exception: with excutils.save_and_reraise_exception(): - msg = _("Unable to update host of port %s") + msg = _LE("Unable to update host of port %s") LOG.exception(msg, p['id']) def add_network_to_project(self, context, project_id, network_uuid=None): @@ -1111,9 +1111,9 @@ def _nw_info_build_network(self, port, networks, subnets): break else: tenant_id = port['tenant_id'] - LOG.warning(_("Network %(id)s not matched with the tenants " - "network! The ports tenant %(tenant_id)s will be " - "used."), + LOG.warning(_LW("Network %(id)s not matched with the tenants " + "network! The ports tenant %(tenant_id)s will be " + "used."), {'id': port['network_id'], 'tenant_id': tenant_id}) bridge = None From c2697f8735a38fb05d54ff4f82110829ead85ee9 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 24 Jun 2014 05:37:34 -0700 Subject: [PATCH 257/486] Deprecate metadata_neutron_* configuration settings Commit 5cacad3508570ce70b1f9ef620e0508169687fda created a neutron configuration section. Move all of the metadata neutron settings to that section. DocImpact The table below has the changes: +-------------------------------------+------------------------------+ | 'DEFAULT' Section | 'neutron' Section | |-------------------------------------|------------------------------| | service_neutron_metadata_proxy | service_metadata_proxy | | neutron_metadata_proxy_shared_secret| metadata_proxy_shared_secret | +-------------------------------------+------------------------------+ Change-Id: Id1b8b387e028c7752729f5c5b373c87397f7577a --- nova/api/metadata/handler.py | 21 +++++++++++++-------- nova/tests/test_metadata.py | 10 ++++++---- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index 92ae37c017..5de245c1a1 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -42,17 +42,22 @@ metadata_proxy_opts = [ cfg.BoolOpt( - 'service_neutron_metadata_proxy', + 'service_metadata_proxy', default=False, help='Set flag to indicate Neutron will proxy metadata requests and ' - 'resolve instance ids.'), + 'resolve instance ids.', + deprecated_group='DEFAULT', + deprecated_name='service_neutron_metadata_proxy'), cfg.StrOpt( - 'neutron_metadata_proxy_shared_secret', + 'metadata_proxy_shared_secret', default='', secret=True, - help='Shared secret to validate proxies Neutron metadata requests') + help='Shared secret to validate proxies Neutron metadata requests', + deprecated_group='DEFAULT', + deprecated_name='neutron_metadata_proxy_shared_secret') ] -CONF.register_opts(metadata_proxy_opts) +# metadata_proxy_opts options in the DEFAULT group were deprecated in Juno +CONF.register_opts(metadata_proxy_opts, 'neutron') LOG = logging.getLogger(__name__) @@ -106,13 +111,13 @@ def __call__(self, req): req.response.content_type = base.MIME_TYPE_TEXT_PLAIN return req.response - if CONF.service_neutron_metadata_proxy: + if CONF.neutron.service_metadata_proxy: meta_data = self._handle_instance_id_request(req) else: if req.headers.get('X-Instance-ID'): LOG.warn( _LW("X-Instance-ID present in request headers. The " - "'service_neutron_metadata_proxy' option must be" + "'service_metadata_proxy' option must be " "enabled to process this header.")) meta_data = self._handle_remote_ip_request(req) @@ -175,7 +180,7 @@ def _handle_instance_id_request(self, req): raise webob.exc.HTTPBadRequest(explanation=msg) expected_signature = hmac.new( - CONF.neutron_metadata_proxy_shared_secret, + CONF.neutron.metadata_proxy_shared_secret, instance_id, hashlib.sha256).hexdigest() diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index ab2a32ee48..8ea9d3b1fa 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -582,7 +582,8 @@ def test_root(self): self.assertEqual(response.body, expected) def test_root_metadata_proxy_enabled(self): - CONF.set_override("service_neutron_metadata_proxy", True) + self.flags(service_metadata_proxy=True, + group='neutron') expected = "\n".join(base.VERSIONS) + "\nlatest" response = fake_request(self.stubs, self.mdinst, "/") @@ -687,7 +688,7 @@ def fake_get_metadata(instance_id, remote_address): (expected_instance_id, instance_id)) signed = hmac.new( - CONF.neutron_metadata_proxy_shared_secret, + CONF.neutron.metadata_proxy_shared_secret, expected_instance_id, hashlib.sha256).hexdigest() @@ -702,7 +703,8 @@ def fake_get_metadata(instance_id, remote_address): self.assertEqual(response.status_int, 200) # now enable the service - self.flags(service_neutron_metadata_proxy=True) + self.flags(service_metadata_proxy=True, + group='neutron') response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", @@ -771,7 +773,7 @@ def fake_get_metadata(instance_id, remote_address): # unexpected Instance-ID signed = hmac.new( - CONF.neutron_metadata_proxy_shared_secret, + CONF.neutron.metadata_proxy_shared_secret, 'z-z-z-z', hashlib.sha256).hexdigest() From f8ae852c1a267a15f6b70026ad40d5d219fc0d33 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Fri, 27 Jun 2014 04:11:51 -0700 Subject: [PATCH 258/486] Network: interface attach and detach raised confusing exception When the aforementioned operations failed they raised an exception that was not coherent - this is due to the fact that the instance object was passed to the exception. That would print the whole instance object which is really confusing to a openstack user. The exceptions should has passed the instance ID and not the instance. TrivialFix Change-Id: If3fa89b17210c1db3540cd813157b51e786e1494 Closes-bug: #1335076 --- nova/compute/manager.py | 3 ++- nova/exception.py | 6 ++++-- nova/tests/compute/test_compute_mgr.py | 2 +- nova/virt/fake.py | 6 ++++-- nova/virt/libvirt/driver.py | 6 ++++-- nova/virt/vmwareapi/vmops.py | 4 ++-- 6 files changed, 17 insertions(+), 10 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b03cf79e48..ff19b01c0f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4512,7 +4512,8 @@ def attach_interface(self, context, instance, network_id, port_id, if len(network_info) != 1: LOG.error(_('allocate_port_for_instance returned %(ports)s ports') % dict(ports=len(network_info))) - raise exception.InterfaceAttachFailed(instance=instance) + raise exception.InterfaceAttachFailed( + instance_uuid=instance.uuid) image_ref = instance.get('image_ref') image_meta = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) diff --git a/nova/exception.py b/nova/exception.py index 8d027dce6d..31ccc33d36 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1302,11 +1302,13 @@ class ConfigDriveUnknownFormat(NovaException): class InterfaceAttachFailed(Invalid): - msg_fmt = _("Failed to attach network adapter device to %(instance)s") + msg_fmt = _("Failed to attach network adapter device to " + "%(instance_uuid)s") class InterfaceDetachFailed(Invalid): - msg_fmt = _("Failed to detach network adapter device from %(instance)s") + msg_fmt = _("Failed to detach network adapter device from " + "%(instance_uuid)s") class InstanceUserDataTooLarge(NovaException): diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 1d7f5171c2..f202289cb3 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -883,7 +883,7 @@ def test_attach_interface_failure(self): f_instance = objects.Instance._from_db_object(self.context, objects.Instance(), db_instance) - e = exception.InterfaceAttachFailed(instance=f_instance) + e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid) @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(self.compute.network_api, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 5ffcbf8f36..95a199816f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -248,14 +248,16 @@ def swap_volume(self, old_connection_info, new_connection_info, def attach_interface(self, instance, image_meta, vif): if vif['id'] in self._interfaces: - raise exception.InterfaceAttachFailed('duplicate') + raise exception.InterfaceAttachFailed( + instance_uuid=instance['uuid']) self._interfaces[vif['id']] = vif def detach_interface(self, instance, vif): try: del self._interfaces[vif['id']] except KeyError: - raise exception.InterfaceDetachFailed('not attached') + raise exception.InterfaceDetachFailed( + instance_uuid=instance['uuid']) def get_info(self, instance): if instance['name'] not in self.instances: diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 5273efab45..5d611cd9e9 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1442,7 +1442,8 @@ def attach_interface(self, instance, image_meta, vif): LOG.error(_LE('attaching network adapter failed.'), instance=instance) self.vif_driver.unplug(instance, vif) - raise exception.InterfaceAttachFailed(instance) + raise exception.InterfaceAttachFailed( + instance_uuid=instance['uuid']) def detach_interface(self, instance, vif): virt_dom = self._lookup_by_name(instance['name']) @@ -1466,7 +1467,8 @@ def detach_interface(self, instance, vif): else: LOG.error(_LE('detaching network adapter failed.'), instance=instance) - raise exception.InterfaceDetachFailed(instance) + raise exception.InterfaceDetachFailed( + instance_uuid=instance['uuid']) def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name): metadata = {'is_public': False, diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index e196c247b0..177e93c06d 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -1550,7 +1550,7 @@ def attach_interface(self, instance, image_meta, vif): ' %s'), e, instance=instance) raise exception.InterfaceAttachFailed( - instance=instance['uuid']) + instance_uuid=instance['uuid']) LOG.debug("Reconfigured VM to attach interface", instance=instance) def detach_interface(self, instance, vif): @@ -1590,7 +1590,7 @@ def detach_interface(self, instance, vif): '%s'), e, instance=instance) raise exception.InterfaceDetachFailed( - instance=instance['uuid']) + instance_uuid=instance['uuid']) LOG.debug("Reconfigured VM to detach interface", instance=instance) From 709127cabed602d6868dedf0bef7ca9547a86e06 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 7 Jul 2014 21:36:32 -0700 Subject: [PATCH 259/486] Network: add in a new network type - DVS The support enables DVS network backends. In this case neutron will return a vif type of DVS. The name of the VC port group will be of the following format: - The network name is so that the prefix will be easy for people to understand. The UUID is to ensure that it will be unique. Change-Id: Id125797632591bd799e215da7cbae07154e90344 --- nova/network/model.py | 1 + nova/network/neutronv2/api.py | 9 ++++++++- nova/tests/network/test_neutronv2.py | 7 +++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/nova/network/model.py b/nova/network/model.py index 0424728788..2829d7bda8 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -31,6 +31,7 @@ def ensure_string_keys(d): # Constants for the 'vif_type' field in VIF class VIF_TYPE_OVS = 'ovs' VIF_TYPE_IVS = 'ivs' +VIF_TYPE_DVS = 'dvs' VIF_TYPE_IOVISOR = 'iovisor' VIF_TYPE_BRIDGE = 'bridge' VIF_TYPE_802_QBG = '802.1qbg' diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f1a3654e44..f7ec8b7648 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -1129,8 +1129,15 @@ def _nw_info_build_network(self, port, networks, subnets): elif vif_type == network_model.VIF_TYPE_BRIDGE: bridge = "brq" + port['network_id'] should_create_bridge = True + elif vif_type == network_model.VIF_TYPE_DVS: + if network_name is None: + bridge = port['network_id'] + else: + bridge = '%s-%s' % (network_name, port['network_id']) - if bridge is not None: + # Prune the bridge name if necessary. For the DVS this is not done + # as the bridge is a '-'. + if bridge is not None and vif_type != network_model.VIF_TYPE_DVS: bridge = bridge[:network_model.NIC_NAME_LEN] network = network_model.Network( diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 1ff73b9361..2c72f1008b 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -2039,6 +2039,13 @@ def test_nw_info_build_network_ovs(self): self.assertNotIn('should_create_bridge', net) self.assertEqual(iid, 'port-id') + def test_nw_info_build_network_dvs(self): + net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS) + self.assertEqual('foo-net-id', net['bridge']) + self.assertNotIn('should_create_bridge', net) + self.assertNotIn('ovs_interfaceid', net) + self.assertIsNone(iid) + def test_nw_info_build_network_bridge(self): net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE) self.assertEqual(net['bridge'], 'brqnet-id') From 416283bd35bb5f8c6b71c493244b00cdfcf7aefc Mon Sep 17 00:00:00 2001 From: "Jay S. Bryant" Date: Tue, 29 Jul 2014 14:58:24 -0500 Subject: [PATCH 260/486] Add hacking check for explicit import of _() To ensure the right message catalog is used when translating messages we need to make sure to explicitly import '_' in any files that use that function. We cannot count on unit test to catch cases where the user has forgotten to import the _() function. This hacking check ensures that the function has been imported anywhere that it is used. Unit tests for the hacking check are included. Change-Id: I9d8101916bcb449345d3123617c2ac75776d053e --- HACKING.rst | 1 + nova/hacking/checks.py | 31 +++++++++++++++++++++++++ nova/tests/api/openstack/test_faults.py | 3 ++- nova/tests/test_hacking.py | 29 +++++++++++++++++++++++ 4 files changed, 63 insertions(+), 1 deletion(-) diff --git a/HACKING.rst b/HACKING.rst index 0d30194b61..7884ac1f9e 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -35,6 +35,7 @@ Nova Specific Commandments self.flags(option=value) instead. - [N321] Validate that LOG messages, except debug ones, have translations - [N322] Method's default argument shouldn't be mutable +- [N323] Ensure that the _() function is explicitly imported to ensure proper translations. Creating Unit Tests ------------------- diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py index b8480cd51f..707e629d51 100644 --- a/nova/hacking/checks.py +++ b/nova/hacking/checks.py @@ -31,6 +31,8 @@ """ +UNDERSCORE_IMPORT_FILES = [] + session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]") cfg_re = re.compile(r".*\scfg\.") vi_header_re = re.compile(r"^#\s+vim?:.+") @@ -54,7 +56,14 @@ conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w") log_translation = re.compile( r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")") +translated_log = re.compile( + r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)" + "\(\s*_\(\s*('|\")") mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") +string_translation = re.compile(r"[^_]*_\(\s*('|\")") +underscore_import_check = re.compile(r"(.)*import _(.)*") +# We need this for cases where they have created their own _ function. +custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") def import_no_db_in_virt(logical_line, filename): @@ -256,6 +265,27 @@ def no_mutable_default_args(logical_line): yield (0, msg) +def check_explicit_underscore_import(logical_line, filename): + """Check for explicit import of the _ function + + We need to ensure that any files that are using the _() function + to translate logs are explicitly importing the _ function. We + can't trust unit test to catch whether the import has been + added so we need to check for it here. + """ + + # Build a list of the files that have _ imported. No further + # checking needed once it is found. + if filename in UNDERSCORE_IMPORT_FILES: + pass + elif (underscore_import_check.match(logical_line) or + custom_underscore_check.match(logical_line)): + UNDERSCORE_IMPORT_FILES.append(filename) + elif (translated_log.match(logical_line) or + string_translation.match(logical_line)): + yield(0, "N323: Found use of _() without explicit import of _ !") + + def factory(register): register(import_no_db_in_virt) register(no_db_session_in_public_api) @@ -272,3 +302,4 @@ def factory(register): register(no_setting_conf_directly_in_tests) register(validate_log_translations) register(no_mutable_default_args) + register(check_explicit_underscore_import) diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py index 889f79b57b..d6bd90d47d 100644 --- a/nova/tests/api/openstack/test_faults.py +++ b/nova/tests/api/openstack/test_faults.py @@ -26,6 +26,7 @@ from nova.api.openstack import wsgi from nova import exception from nova import i18n +from nova.i18n import _ from nova.openstack.common import jsonutils from nova import test @@ -43,7 +44,7 @@ def fake_translate(value, locale): # Create an exception, passing a translatable message with a # known value we can test for later. - safe_exception = exception.NotFound(i18n._('Should be translated.')) + safe_exception = exception.NotFound(_('Should be translated.')) safe_exception.safe = True safe_exception.code = 404 diff --git a/nova/tests/test_hacking.py b/nova/tests/test_hacking.py index 5406e0cd6f..2db0936680 100644 --- a/nova/tests/test_hacking.py +++ b/nova/tests/test_hacking.py @@ -188,3 +188,32 @@ def test_no_mutable_default_args(self): self.assertEqual(0, len(list(checks.no_mutable_default_args( "defined, undefined = [], {}")))) + + def test_check_explicit_underscore_import(self): + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "LOG.info(_('My info message'))", + "cinder/tests/other_files.py"))), 1) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "msg = _('My message')", + "cinder/tests/other_files.py"))), 1) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "from cinder.i18n import _", + "cinder/tests/other_files.py"))), 0) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "LOG.info(_('My info message'))", + "cinder/tests/other_files.py"))), 0) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "msg = _('My message')", + "cinder/tests/other_files.py"))), 0) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "from cinder.i18n import _, _LW", + "cinder/tests/other_files2.py"))), 0) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "msg = _('My message')", + "cinder/tests/other_files2.py"))), 0) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "_ = translations.ugettext", + "cinder/tests/other_files3.py"))), 0) + self.assertEqual(len(list(checks.check_explicit_underscore_import( + "msg = _('My message')", + "cinder/tests/other_files3.py"))), 0) From 57a2dd0bf6e964b342c8c4c1bc4ac028df4ef0d8 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 30 Jul 2014 23:48:13 -0700 Subject: [PATCH 261/486] Add policy on how patches and reviews go hand in hand There is currently a misconception about how a patch gets merged: Author proposes a patch, review team reviews it. Us, and Them. Instead it should be a 'We.' If the review bandwidth doesn't grow at the same rate as the patch proposal rate then we end up with a massive backlog where people are pinging cores for reviews. Record a recommended policy of asking patch authors to offset the review resources spent on their patch by reviewing other patches. Change-Id: I50653f8ea58240fa664519f544ce16b2adf39006 --- doc/source/devref/policies.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/devref/policies.rst b/doc/source/devref/policies.rst index 28777bc6a9..67e0adb9ce 100644 --- a/doc/source/devref/policies.rst +++ b/doc/source/devref/policies.rst @@ -34,3 +34,13 @@ this wrong. This policy is in place to prevent us from making backwards incompatible changes to APIs. + +Patches and Reviews +=================== + +Merging a patch requires a non-trivial amount of reviewer resources. +As a patch author, you should try to offset the reviewer resources +spent on your patch by reviewing other patches. If no one does this, the review +team (cores and otherwise) become spread too thin. + +For review guidelines see: https://wiki.openstack.org/wiki/ReviewChecklist From c34840e50d0bf60b4e60bbbcbc178aef83e0e7ad Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Mon, 4 Aug 2014 15:21:26 +0930 Subject: [PATCH 262/486] Handle FloatingIpPoolNotFound exception in floating ip creation If an invalid pool is specified when creating a floating ip with neutron as the backend a FloatingIpPoolNotFound exception can be raised. This change handles the exception at the API level. Change-Id: Iac46d86a11f2c1da0ed7e7861cdb87fd4d4b4c6c Closes-Bug: 1352141 --- nova/api/openstack/compute/contrib/floating_ips.py | 2 ++ .../api/openstack/compute/contrib/test_floating_ips.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py index 5b81150606..57626e30e1 100644 --- a/nova/api/openstack/compute/contrib/floating_ips.py +++ b/nova/api/openstack/compute/contrib/floating_ips.py @@ -170,6 +170,8 @@ def create(self, req, body=None): else: msg = _("IP allocation over quota.") raise webob.exc.HTTPForbidden(explanation=msg) + except exception.FloatingIpPoolNotFound as e: + raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return _translate_floating_ip_view(ip) diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py index 615c3be557..7b96a8f513 100644 --- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py @@ -344,6 +344,15 @@ def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock): self.assertIn('IP allocation over quota in pool non_existent_pool.', ex.explanation) + @mock.patch('nova.network.api.API.allocate_floating_ip', + side_effect=exception.FloatingIpPoolNotFound()) + def test_floating_ip_create_with_unknown_pool(self, allocate_mock): + req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips') + ex = self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, {'pool': 'non_existent_pool'}) + + self.assertIn('Floating ip pool not found.', ex.explanation) + def test_floating_ip_allocate(self): def fake1(*args, **kwargs): pass From 88af4f0cf426d2f2394a1fa254987a088306c3b3 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Thu, 31 Jul 2014 19:47:05 +0800 Subject: [PATCH 263/486] Handle NetworkAmbiguous error when booting a new instance with v3 api Catch exception.NetworkAmbiguous error and map it to exc.HttpConflict when using v3 api to create a new instance. Change-Id: I18b6b9f0d8612a3757f6489dada41b5cd8eca758 Closes-Bug: 1350800 --- nova/api/openstack/compute/plugins/v3/servers.py | 1 + nova/tests/api/openstack/compute/plugins/v3/test_servers.py | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 8d647fd3f7..23c8b5a39e 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -532,6 +532,7 @@ def create(self, req, body): exception.NetworkNotFound) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except (exception.PortInUse, + exception.NetworkAmbiguous, exception.NoUniqueMatch) as error: raise exc.HTTPConflict(explanation=error.format_message()) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index f1f08a7d6d..e0f599e6a5 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -2516,6 +2516,12 @@ def fake_create(*args, **kwargs): self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) + @mock.patch.object(compute_api.API, 'create') + def test_create_instance_with_network_ambiguous(self, mock_create): + mock_create.side_effect = exception.NetworkAmbiguous() + self.assertRaises(webob.exc.HTTPConflict, + self._test_create_extra, {}) + class ServersControllerCreateTestWithMock(test.TestCase): image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' From 5506dc7b3c16dbeedf69ffd2fe9c62201aea118a Mon Sep 17 00:00:00 2001 From: shuangtai Date: Mon, 4 Aug 2014 17:24:21 +0800 Subject: [PATCH 264/486] Compute: add log exception hints Add hints for log exception messages Change-Id: I05367ac3d9ff94e7d6aa5e446222d4d01f970205 --- nova/compute/api.py | 13 +++-- nova/compute/flavors.py | 5 +- nova/compute/manager.py | 121 ++++++++++++++++++++-------------------- 3 files changed, 72 insertions(+), 67 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 4861bc7c78..d4305ce623 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -44,6 +44,7 @@ from nova import exception from nova import hooks from nova.i18n import _ +from nova.i18n import _LE from nova import image from nova import network from nova.network import model as network_model @@ -558,8 +559,8 @@ def _apply_instance_name_template(self, context, instance, index): new_name = (CONF.multi_instance_display_name_template % params) except (KeyError, TypeError): - LOG.exception(_('Failed to set instance name using ' - 'multi_instance_display_name_template.')) + LOG.exception(_LE('Failed to set instance name using ' + 'multi_instance_display_name_template.')) new_name = instance['display_name'] instance.display_name = new_name if not instance.get('hostname', None): @@ -1434,8 +1435,8 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): "from shelved instance (%s)."), exc.format_message(), instance=instance) except Exception as exc: - LOG.exception(_("Something wrong happened when trying to " - "delete snapshot from shelved instance."), + LOG.exception(_LE("Something wrong happened when trying to " + "delete snapshot from shelved instance."), instance=instance) original_task_state = instance.task_state @@ -3758,8 +3759,8 @@ def destroy(self, context, security_group): quotas.reserve(context, project_id=quota_project, user_id=quota_user, security_groups=-1) except Exception: - LOG.exception(_("Failed to update usages deallocating " - "security group")) + LOG.exception(_LE("Failed to update usages deallocating " + "security group")) LOG.audit(_("Delete security group %s"), security_group['name'], context=context) diff --git a/nova/compute/flavors.py b/nova/compute/flavors.py index 1a8760d408..1d1e0d60b7 100644 --- a/nova/compute/flavors.py +++ b/nova/compute/flavors.py @@ -28,6 +28,7 @@ from nova import db from nova import exception from nova.i18n import _ +from nova.i18n import _LE from nova.openstack.common.db import exception as db_exc from nova.openstack.common import log as logging from nova.openstack.common import strutils @@ -163,7 +164,7 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None, try: return db.flavor_create(context.get_admin_context(), kwargs) except db_exc.DBError as e: - LOG.exception(_('DB error: %s') % e) + LOG.exception(_LE('DB error: %s'), e) raise exception.FlavorCreateFailed() @@ -174,7 +175,7 @@ def destroy(name): raise ValueError() db.flavor_destroy(context.get_admin_context(), name) except (ValueError, exception.NotFound): - LOG.exception(_('Instance type %s not found for deletion') % name) + LOG.exception(_LE('Instance type %s not found for deletion'), name) raise exception.FlavorNotFoundByName(flavor_name=name) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5ba955cbfc..eb1d317793 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -362,8 +362,9 @@ def decorated_function(self, context, image_id, instance, try: self.image_api.delete(context, image_id) except Exception: - LOG.exception(_("Error while trying to clean up image %s") - % image_id, instance=instance) + LOG.exception(_LE("Error while trying to clean up " + "image %s"), image_id, + instance=instance) return decorated_function @@ -750,7 +751,7 @@ def _is_instance_storage_shared(self, context, instance): instance=instance) shared_storage = False except Exception: - LOG.exception(_('Failed to check if instance shared'), + LOG.exception(_LE('Failed to check if instance shared'), instance=instance) finally: if data: @@ -816,7 +817,7 @@ def _init_instance(self, context, instance): self._complete_partial_deletion(context, instance) except Exception: # we don't want that an exception blocks the init_host - msg = _('Failed to complete a deletion') + msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) return @@ -867,7 +868,7 @@ def _init_instance(self, context, instance): self._delete_instance(context, instance, bdms, quotas) except Exception: # we don't want that an exception blocks the init_host - msg = _('Failed to complete a deletion') + msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) self._set_instance_error_state(context, instance) return @@ -908,7 +909,7 @@ def _init_instance(self, context, instance): self.stop_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host - msg = _('Failed to stop instance') + msg = _LE('Failed to stop instance') LOG.exception(msg, instance=instance) return @@ -920,7 +921,7 @@ def _init_instance(self, context, instance): self.start_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host - msg = _('Failed to start instance') + msg = _LE('Failed to start instance') LOG.exception(msg, instance=instance) return @@ -945,7 +946,7 @@ def _init_instance(self, context, instance): instance, net_info, block_dev_info, power_on) except Exception as e: - LOG.exception(_('Failed to revert crashed migration'), + LOG.exception(_LE('Failed to revert crashed migration'), instance=instance) finally: LOG.info(_('Instance found in migrating state during ' @@ -1359,8 +1360,8 @@ def _build_instance(self, context, request_spec, filter_properties, try: self._deallocate_network(context, instance) except Exception: - msg = _('Failed to dealloc network ' - 'for deleted instance') + msg = _LE('Failed to dealloc network ' + 'for deleted instance') LOG.exception(msg, instance=instance) raise exception.BuildAbortException( instance_uuid=instance['uuid'], @@ -1380,8 +1381,8 @@ def _build_instance(self, context, request_spec, filter_properties, try: self._deallocate_network(context, instance) except Exception: - msg = _('Failed to dealloc network ' - 'for failed instance') + msg = _LE('Failed to dealloc network ' + 'for failed instance') LOG.exception(msg, instance=instance) except Exception: exc_info = sys.exc_info() @@ -1453,7 +1454,7 @@ def _reschedule_or_error(self, context, instance, exc_info, except Exception: rescheduled = False - LOG.exception(_("Error trying to reschedule"), + LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) return rescheduled @@ -1562,8 +1563,8 @@ def _allocate_network_async(self, context, instance, requested_networks, log_info = {'attempt': attempt, 'attempts': attempts} if attempt == attempts: - LOG.exception(_('Instance failed network setup ' - 'after %(attempts)d attempt(s)'), + LOG.exception(_LE('Instance failed network setup ' + 'after %(attempts)d attempt(s)'), log_info) raise exc_info[0], exc_info[1], exc_info[2] LOG.warn(_('Instance failed network setup ' @@ -1747,7 +1748,7 @@ def _prep_block_device(self, context, instance, bdms): raise exception.InvalidBDM() except Exception: - LOG.exception(_('Instance failed block device setup'), + LOG.exception(_LE('Instance failed block device setup'), instance=instance) raise exception.InvalidBDM() @@ -1767,7 +1768,8 @@ def _spawn(self, context, instance, image_meta, network_info, block_device_info) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_('Instance failed to spawn'), instance=instance) + LOG.exception(_LE('Instance failed to spawn'), + instance=instance) current_power_state = self._get_power_state(context, instance) @@ -1949,7 +1951,7 @@ def do_build_and_run_instance(context, instance, image, request_spec, self._set_instance_error_state(context, instance) except Exception: # Should not reach here. - msg = _('Unexpected build failure, not rescheduling build.') + msg = _LE('Unexpected build failure, not rescheduling build.') LOG.exception(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) @@ -2022,7 +2024,7 @@ def _build_and_run_instance(self, context, instance, image, injected_files, reason=msg) except (exception.VirtualInterfaceCreateException, exception.VirtualInterfaceMacAddressException) as e: - LOG.exception(_('Failed to allocate network(s)'), + LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) @@ -2070,7 +2072,7 @@ def _build_resources(self, context, instance, requested_networks, except Exception: # Because this allocation is async any failures are likely to occur # when the driver accesses network_info during spawn(). - LOG.exception(_('Failed to allocate network(s)'), + LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, @@ -2096,7 +2098,7 @@ def _build_resources(self, context, instance, requested_networks, raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: - LOG.exception(_('Failure prepping block device'), + LOG.exception(_LE('Failure prepping block device'), instance=instance) msg = _('Failure prepping block device.') raise exception.BuildAbortException(instance_uuid=instance.uuid, @@ -2108,7 +2110,7 @@ def _build_resources(self, context, instance, requested_networks, with excutils.save_and_reraise_exception() as ctxt: if not isinstance(exc, (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError)): - LOG.exception(_('Instance failed to spawn'), + LOG.exception(_LE('Instance failed to spawn'), instance=instance) # Make sure the async call finishes if network_info is not None: @@ -2129,7 +2131,7 @@ def _cleanup_allocated_networks(self, context, instance, try: self._deallocate_network(context, instance, requested_networks) except Exception: - msg = _('Failed to deallocate networks') + msg = _LE('Failed to deallocate networks') LOG.exception(msg, instance=instance) return @@ -2355,7 +2357,7 @@ def do_terminate_instance(instance, bdms): # As we're trying to delete always go to Error if something # goes wrong that _delete_instance can't handle. with excutils.save_and_reraise_exception(): - LOG.exception(_('Setting instance vm_state to ERROR'), + LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) self._set_instance_error_state(context, instance) @@ -2567,7 +2569,7 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref, compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.NotFound: - LOG.exception(_('Failed to get compute_info for %s') % + LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host @@ -2993,7 +2995,7 @@ def set_admin_password(self, context, instance, new_pass): raise except Exception as e: # Catch all here because this could be anything. - LOG.exception(_('set_admin_password failed: %s') % e, + LOG.exception(_LE('set_admin_password failed: %s'), e, instance=instance) self._set_instance_obj_error_state(context, instance) # We create a new exception here so that we won't @@ -3078,7 +3080,7 @@ def rescue_instance(self, context, instance, rescue_password, network_info, rescue_image_meta, admin_password) except Exception as e: - LOG.exception(_("Error trying to Rescue Instance"), + LOG.exception(_LE("Error trying to Rescue Instance"), instance=instance) raise exception.InstanceNotRescuable( instance_id=instance.uuid, @@ -3502,7 +3504,7 @@ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, method_args, task_state, exc_info) except Exception as error: rescheduled = False - LOG.exception(_("Error trying to reschedule"), + LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, error, @@ -3693,14 +3695,14 @@ def finish_resize(self, context, disk_info, image, instance, disk_info, image) quotas.commit() except Exception: - LOG.exception(_('Setting instance vm_state to ERROR'), + LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) with excutils.save_and_reraise_exception(): try: quotas.rollback() except Exception as qr_error: - LOG.exception(_("Failed to rollback quota for failed " - "finish_resize: %s"), + LOG.exception(_LE("Failed to rollback quota for failed " + "finish_resize: %s"), qr_error, instance=instance) self._set_instance_error_state(context, instance) @@ -4045,7 +4047,8 @@ def _unshelve_instance(self, context, instance, image, filter_properties, block_device_info=block_device_info) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_('Instance failed to spawn'), instance=instance) + LOG.exception(_LE('Instance failed to spawn'), + instance=instance) if image: instance.image_ref = shelved_image_ref @@ -4296,8 +4299,8 @@ def _attach_volume(self, context, instance, bdm): do_check_attach=False, do_driver_attach=True) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to attach %(volume_id)s " - "at %(mountpoint)s"), + LOG.exception(_LE("Failed to attach %(volume_id)s " + "at %(mountpoint)s"), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) @@ -4335,8 +4338,8 @@ def _detach_volume(self, context, instance, bdm): encryption=encryption) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): - LOG.exception(_('Failed to detach volume %(volume_id)s ' - 'from %(mp)s'), + LOG.exception(_LE('Failed to detach volume %(volume_id)s ' + 'from %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) self.volume_api.roll_detaching(context, volume_id) @@ -4408,17 +4411,17 @@ def _swap_volume(self, context, instance, bdm, connector, old_volume_id, failed = True with excutils.save_and_reraise_exception(): if new_cinfo: - msg = _("Failed to swap volume %(old_volume_id)s " - "for %(new_volume_id)s") - LOG.exception(msg % {'old_volume_id': old_volume_id, - 'new_volume_id': new_volume_id}, + msg = _LE("Failed to swap volume %(old_volume_id)s " + "for %(new_volume_id)s") + LOG.exception(msg, {'old_volume_id': old_volume_id, + 'new_volume_id': new_volume_id}, context=context, instance=instance) else: - msg = _("Failed to connect to volume %(volume_id)s " - "with volume at %(mountpoint)s") - LOG.exception(msg % {'volume_id': new_volume_id, - 'mountpoint': bdm['device_name']}, + msg = _LE("Failed to connect to volume %(volume_id)s " + "with volume at %(mountpoint)s") + LOG.exception(msg, {'volume_id': new_volume_id, + 'mountpoint': bdm['device_name']}, context=context, instance=instance) self.volume_api.roll_detaching(context, old_volume_id) @@ -4701,7 +4704,7 @@ def live_migration(self, context, dest, instance, block_migration, except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_('Pre live migration failed at %s'), + LOG.exception(_LE('Pre live migration failed at %s'), dest, instance=instance) self._rollback_live_migration(context, instance, dest, block_migration, migrate_data) @@ -4899,7 +4902,7 @@ def post_live_migration_at_destination(self, context, instance, compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.NotFound: - LOG.exception(_('Failed to get compute_info for %s') % self.host) + LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.power_state = current_power_state @@ -5201,7 +5204,7 @@ def _poll_shelved_instances(self, context): instance.save() self.shelve_offload_instance(context, instance) except Exception: - LOG.exception(_('Periodic task failed to offload instance.'), + LOG.exception(_LE('Periodic task failed to offload instance.'), instance=instance) @periodic_task.periodic_task @@ -5241,9 +5244,9 @@ def _instance_usage_audit(self, context): ignore_missing_network_data=False) successes += 1 except Exception: - LOG.exception(_('Failed to generate usage ' - 'audit for instance ' - 'on host %s') % self.host, + LOG.exception(_LE('Failed to generate usage ' + 'audit for instance ' + 'on host %s'), self.host, instance=instance) errors += 1 compute_utils.finish_instance_usage_audit(context, @@ -5531,8 +5534,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # because the same power_state will be retrieved next # time and retried. # For example, there might be another task scheduled. - LOG.exception(_("error during stop() in " - "sync_power_state."), + LOG.exception(_LE("error during stop() in " + "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.SUSPENDED: LOG.warn(_("Instance is suspended unexpectedly. Calling " @@ -5540,8 +5543,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, try: self.compute_api.stop(context, db_instance) except Exception: - LOG.exception(_("error during stop() in " - "sync_power_state."), + LOG.exception(_LE("error during stop() in " + "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.PAUSED: # Note(maoy): a VM may get into the paused state not only @@ -5571,8 +5574,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # instance. self.compute_api.force_stop(context, db_instance) except Exception: - LOG.exception(_("error during stop() in " - "sync_power_state."), + LOG.exception(_LE("error during stop() in " + "sync_power_state."), instance=db_instance) elif vm_state == vm_states.PAUSED: if vm_power_state in (power_state.SHUTDOWN, @@ -5582,8 +5585,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, try: self.compute_api.force_stop(context, db_instance) except Exception: - LOG.exception(_("error during stop() in " - "sync_power_state."), + LOG.exception(_LE("error during stop() in " + "sync_power_state."), instance=db_instance) elif vm_state in (vm_states.SOFT_DELETED, vm_states.DELETED): @@ -5785,7 +5788,7 @@ def _error_out_instance_on_exception(self, context, instance, task_state=None) raise error.inner_exception except Exception: - LOG.exception(_('Setting instance vm_state to ERROR'), + LOG.exception(_LE('Setting instance vm_state to ERROR'), instance_uuid=instance_uuid) with excutils.save_and_reraise_exception(): if quotas: From d441dcb2eeaf6c552ef469d804469ac97a2651a4 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 4 Aug 2014 07:59:00 -0700 Subject: [PATCH 265/486] Update devref out-of-tree policy grammar error The sentence structure was wrong and therefore bugging me. Change-Id: Iac7f0803d9cc9b73075f3f0aa23e690386be71ee --- doc/source/devref/policies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/devref/policies.rst b/doc/source/devref/policies.rst index 28777bc6a9..b36a427991 100644 --- a/doc/source/devref/policies.rst +++ b/doc/source/devref/policies.rst @@ -18,7 +18,7 @@ Out Of Tree Support =================== While nova has many entrypoints and other places in the code that allow for -wiring in out of tree code. Upstream doesn't actively make any guarantees +wiring in out of tree code, upstream doesn't actively make any guarantees about these extensibility points; we don't support them, make any guarantees about compatibility, stability, etc. From b717696b5cff69e3586e06c399be7d06c057e503 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 4 Aug 2014 10:15:37 -0700 Subject: [PATCH 266/486] Make spawn_n() stub properly ignore errors in the child thread work When we call spawn_n() normally, we fork off a thread that can run or die on its own, without affecting the parent. In unit tests, we stub this out to be a synchronous call, but we allow any exceptions that occur in that work to bubble up to the caller. This is not normal behavior and thus we should discard any such exceptions in order to mimic actual behavior of a child thread. Change-Id: I35ab21e9525aa76cced797436daa0b99a4fa99f2 Related-bug: #1349147 --- nova/tests/fake_utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py index cb73bc8bb9..7a97866d20 100644 --- a/nova/tests/fake_utils.py +++ b/nova/tests/fake_utils.py @@ -23,6 +23,14 @@ def stub_out_utils_spawn_n(stubs): This aids testing async processes by blocking until they're done. """ def no_spawn(func, *args, **kwargs): - return func(*args, **kwargs) + try: + return func(*args, **kwargs) + except Exception: + # NOTE(danms): This is supposed to simulate spawning + # of a thread, which would run separate from the parent, + # and die silently on error. If we don't catch and discard + # any exceptions here, we're not honoring the usual + # behavior. + pass stubs.Set(utils, 'spawn_n', no_spawn) From 9d334b2231ec8d9ec19b88e71022f705147a2eaa Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 11 Apr 2014 15:16:54 -0700 Subject: [PATCH 267/486] Remove instance_info_cache_delete() from conductor This was not used in v2.0 of the RPC API, so we can remove it outright without deprecating it in conductor manager. Related to blueprint compute-manager-objects-juno Change-Id: I68068c1d0ef8f346406fe3ccb341a8d0a2474bdd --- nova/conductor/api.py | 3 --- nova/conductor/manager.py | 3 --- nova/conductor/rpcapi.py | 5 ----- nova/tests/conductor/test_conductor.py | 7 ------- 4 files changed, 18 deletions(-) diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 77444ab66f..b7e3257a52 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -74,9 +74,6 @@ def instance_get_all_by_host_and_node(self, context, host, node): return self._manager.instance_get_all_by_host(context, host, node, None) - def instance_info_cache_delete(self, context, instance): - return self._manager.instance_info_cache_delete(context, instance) - def migration_get_in_progress_by_host_and_node(self, context, host, node): return self._manager.migration_get_in_progress_by_host_and_node( context, host, node) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index d0ab4f981b..6d360e8e21 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -241,9 +241,6 @@ def instance_destroy(self, context, instance): result = self.db.instance_destroy(context, instance['uuid']) return jsonutils.to_primitive(result) - def instance_info_cache_delete(self, context, instance): - self.db.instance_info_cache_delete(context, instance['uuid']) - def instance_fault_create(self, context, values): result = self.db.instance_fault_create(context, values) return jsonutils.to_primitive(result) diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 0972f89e9e..b6748d49c2 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -219,11 +219,6 @@ def block_device_mapping_get_all_by_instance(self, context, instance, return cctxt.call(context, 'block_device_mapping_get_all_by_instance', instance=instance_p, legacy=legacy) - def instance_info_cache_delete(self, context, instance): - instance_p = jsonutils.to_primitive(instance) - cctxt = self.client.prepare() - cctxt.call(context, 'instance_info_cache_delete', instance=instance_p) - def vol_get_usage_by_time(self, context, start_time): start_time_p = jsonutils.to_primitive(start_time) cctxt = self.client.prepare() diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 1b396af45b..50d4b616f8 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -186,13 +186,6 @@ def test_block_device_mapping_get_all_by_instance(self): self.context, fake_inst, legacy=False) self.assertEqual(result, 'fake-result') - def test_instance_info_cache_delete(self): - self.mox.StubOutWithMock(db, 'instance_info_cache_delete') - db.instance_info_cache_delete(self.context, 'fake-uuid') - self.mox.ReplayAll() - self.conductor.instance_info_cache_delete(self.context, - {'uuid': 'fake-uuid'}) - def test_vol_usage_update(self): self.mox.StubOutWithMock(db, 'vol_usage_update') self.mox.StubOutWithMock(compute_utils, 'usage_volume_info') From 17f6357dc951145e131c7e243c797e7e3a8f99cb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 4 Aug 2014 15:31:43 +0000 Subject: [PATCH 268/486] Updated from global requirements Change-Id: I498bf40a94120a4f56a35154aa4d7a66668cd3fb --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3240cefcf9..a26949c6d8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,9 +24,9 @@ Babel>=1.3 iso8601>=0.1.9 jsonschema>=2.0.0,<3.0.0 python-cinderclient>=1.0.7 -python-neutronclient>=2.3.5,<3 +python-neutronclient>=2.3.6,<3 python-glanceclient>=0.13.1 -python-keystoneclient>=0.9.0 +python-keystoneclient>=0.10.0 six>=1.7.0 stevedore>=0.14 websockify>=0.5.1,<0.6 From c13b33254a367782d1b778c8523b420d425a44e9 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 4 Aug 2014 14:10:18 -0700 Subject: [PATCH 269/486] Handle MacAddressInUseClient exception from Neutron when creating port This adds exception handling for the new MacAddressInUseClient exception from python-neutronclient-2.3.6 when trying to create a port with a specific MAC address. The PortInUse exception is used so that the build will be rescheduled if this error is encountered. Also, the servers API already handles PortInUse and translates it to an HTTPConflict response. Closes-Bug: #1347778 Change-Id: I929b5f9d79bcb7e60e8b3482e5ee1ca2fababed1 --- nova/network/neutronv2/api.py | 6 ++++++ nova/tests/network/test_neutronv2.py | 24 ++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f1a3654e44..ceb28f734b 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -217,6 +217,12 @@ def _create_port(self, port_client, instance, network_id, port_req_body, LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'), network_id, instance=instance) raise exception.NoMoreFixedIps() + except neutron_client_exc.MacAddressInUseClient: + LOG.warning(_LW('Neutron error: MAC address %(mac)s is already ' + 'in use on network %(network)s.') % + {'mac': mac_address, 'network': network_id}, + instance=instance) + raise exception.PortInUse(port_id=mac_address) except neutron_client_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_('Neutron error creating port on network %s'), diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 1ff73b9361..fb894b040e 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -2345,6 +2345,30 @@ def test_create_port_for_instance_no_more_ip(self): instance, net['id'], port_req_body) create_port_mock.assert_called_once_with(port_req_body) + @mock.patch.object(client.Client, 'create_port', + side_effect=exceptions.MacAddressInUseClient()) + def test_create_port_for_instance_mac_address_in_use(self, + create_port_mock): + # Create fake data. + instance = fake_instance.fake_instance_obj(self.context) + net = {'id': 'my_netid1', + 'name': 'my_netname1', + 'subnets': ['mysubnid1'], + 'tenant_id': instance['project_id']} + zone = 'compute:%s' % instance['availability_zone'] + port_req_body = {'port': {'device_id': instance['uuid'], + 'device_owner': zone, + 'mac_address': 'XX:XX:XX:XX:XX:XX'}} + available_macs = set(['XX:XX:XX:XX:XX:XX']) + # Run the code. + self.assertRaises(exception.PortInUse, + self.api._create_port, + neutronv2.get_client(self.context), + instance, net['id'], port_req_body, + available_macs=available_macs) + # Assert the calls. + create_port_mock.assert_called_once_with(port_req_body) + class TestNeutronv2ModuleMethods(test.TestCase): From 49c3ac406503b5fd80fea896ff0eca4117c5f2ac Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Thu, 27 Mar 2014 11:29:32 -0400 Subject: [PATCH 270/486] libvirt: parse disk backing chains from domain XML Add support for parsing XML elements in the libvirt domain XML description for disks. This is being added to libvirt in order to support blockjob operations for network-attached disks. Implements: blueprint libvirt-volume-snap-network-disk Change-Id: I370c9e8d6a1b1f66e385f009f897816f2e705a36 --- nova/tests/virt/libvirt/test_config.py | 61 ++++++++++++++++++++++++++ nova/virt/libvirt/config.py | 49 +++++++++++++++++++-- 2 files changed, 107 insertions(+), 3 deletions(-) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index e0363ed3e3..3eccc4ac61 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -681,6 +681,67 @@ def test_config_file_parse(self): self.assertEqual(obj.target_bus, 'ide') +class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest): + + def test_config_file_parse(self): + xml = """ + + + + + + + + + """ + xmldoc = etree.fromstring(xml) + + obj = config.LibvirtConfigGuestDiskBackingStore() + obj.parse_dom(xmldoc) + + self.assertEqual(obj.driver_name, 'qemu') + self.assertEqual(obj.driver_format, 'qcow2') + self.assertEqual(obj.source_type, 'file') + self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2') + self.assertEqual(obj.backing_store.driver_name, 'qemu') + self.assertEqual(obj.backing_store.source_type, 'file') + self.assertEqual(obj.backing_store.source_file, + '/var/lib/libvirt/images/base.qcow2') + self.assertIsNone(obj.backing_store.backing_store) + + def test_config_network_parse(self): + xml = """ + + + + + + + + + + + + + """ + xmldoc = etree.fromstring(xml) + + obj = config.LibvirtConfigGuestDiskBackingStore() + obj.parse_dom(xmldoc) + + self.assertEqual(obj.source_type, 'network') + self.assertEqual(obj.source_protocol, 'gluster') + self.assertEqual(obj.source_name, 'volume1/img1') + self.assertEqual(obj.source_hosts[0], 'host1') + self.assertEqual(obj.source_ports[0], '24007') + self.assertEqual(obj.index, '1') + self.assertEqual(obj.backing_store.source_name, 'volume1/img2') + self.assertEqual(obj.backing_store.index, '2') + self.assertEqual(obj.backing_store.source_hosts[0], 'host1') + self.assertEqual(obj.backing_store.source_ports[0], '24007') + self.assertIsNone(obj.backing_store.backing_store) + + class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest): def test_config_mount(self): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 7ff03ff6b5..0cb66344bf 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -503,6 +503,7 @@ def __init__(self, **kwargs): self.physical_block_size = None self.readonly = False self.snapshot = None + self.backing_store = None def format_dom(self): dev = super(LibvirtConfigGuestDisk, self).format_dom() @@ -630,15 +631,57 @@ def parse_dom(self, xmldoc): elif c.tag == 'serial': self.serial = c.text - - for c in xmldoc.getchildren(): - if c.tag == 'target': + elif c.tag == 'target': if self.source_type == 'mount': self.target_path = c.get('dir') else: self.target_dev = c.get('dev') self.target_bus = c.get('bus', None) + elif c.tag == 'backingStore': + b = LibvirtConfigGuestDiskBackingStore() + b.parse_dom(c) + self.backing_store = b + + +class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject): + def __init__(self, **kwargs): + super(LibvirtConfigGuestDiskBackingStore, self).__init__( + root_name="backingStore", **kwargs) + + self.index = None + self.source_type = None + self.source_file = None + self.source_protocol = None + self.source_name = None + self.source_hosts = [] + self.source_ports = [] + self.driver_name = None + self.driver_format = None + self.backing_store = None + + def parse_dom(self, xmldoc): + super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc) + + self.source_type = xmldoc.get('type') + self.index = xmldoc.get('index') + + for c in xmldoc.getchildren(): + if c.tag == 'driver': + self.driver_name = c.get('name') + self.driver_format = c.get('type') + elif c.tag == 'source': + self.source_file = c.get('file') + self.source_protocol = c.get('protocol') + self.source_name = c.get('name') + for d in c.getchildren(): + if d.tag == 'host': + self.source_hosts.append(d.get('name')) + self.source_ports.append(d.get('port')) + elif c.tag == 'backingStore': + if c.getchildren(): + self.backing_store = LibvirtConfigGuestDiskBackingStore() + self.backing_store.parse_dom(c) class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject): From bdf11c856266114bf3d150eb482d4e55f5f37245 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Thu, 27 Mar 2014 11:29:50 -0400 Subject: [PATCH 271/486] libvirt: volume snapshot delete for network-attached disks Add support for performing a volume snapshot delete for a network/(libgfapi)-attached disk based on the proposal at: http://www.redhat.com/archives/libvir-list/2014-February/msg01226.html Uses a new VIR_DOMAIN_BLOCK_COMMIT_RELATIVE flag for blockCommit calls which keeps qcow2 backing file entries as relative filenames rather than prepending them with path information when blockCommit operations are performed. Support for this flag is required and heuristically detected rather than based on version number. Selection of disk for blockCommit/blockRebase is handled by searching the snapshot hierarchy and using an identifier such as vda[2] meaning the second item in the snapshot chain for disk 'vda'. Implements: blueprint libvirt-volume-snap-network-disk Change-Id: I31710b787c39d23870fb45a460f460663ecb261c --- nova/tests/virt/libvirt/fakelibvirt.py | 3 + nova/tests/virt/libvirt/test_driver.py | 124 +++++++++++++++++++++++++ nova/virt/libvirt/driver.py | 99 ++++++++++++++++++-- 3 files changed, 216 insertions(+), 10 deletions(-) diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py index 378e795848..5190f673d9 100644 --- a/nova/tests/virt/libvirt/fakelibvirt.py +++ b/nova/tests/virt/libvirt/fakelibvirt.py @@ -143,6 +143,9 @@ def _reset(): VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 +# blockCommit flags +VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4 + VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1 VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2 diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 79908847ee..0f54027ea7 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -10298,6 +10298,40 @@ def setUp(self): """ + # alternate domain info with network-backed snapshot chain + self.dom_netdisk_xml = """ + + + + + + 0e38683e-f0af-418f-a3f1-6b67eaffffff + + + + + + + + + + + + + + + + + + + + + 0e38683e-f0af-418f-a3f1-6b67ea0f919d + + + + """ + self.create_info = {'type': 'qcow2', 'snapshot_id': '1234-5678', 'new_file': 'new-file'} @@ -10313,6 +10347,10 @@ def setUp(self): 'file_to_merge': 'snap.img', 'merge_target_file': 'other-snap.img'} + self.delete_info_netdisk = {'type': 'qcow2', + 'file_to_merge': 'snap.img', + 'merge_target_file': 'root.img'} + self.delete_info_invalid_type = {'type': 'made_up_type', 'file_to_merge': 'some_file', 'merge_target_file': @@ -10681,3 +10719,89 @@ def test_volume_snapshot_delete_invalid_type(self): self.volume_uuid, self.snapshot_id, self.delete_info_invalid_type) + + def test_volume_snapshot_delete_netdisk_1(self): + """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" + + class FakeNetdiskDomain(FakeVirtDomain): + def __init__(self, *args, **kwargs): + super(FakeNetdiskDomain, self).__init__(*args, **kwargs) + + def XMLDesc(self, *args): + return self.dom_netdisk_xml + + # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE + self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) + + instance = db.instance_create(self.c, self.inst) + snapshot_id = 'snapshot-1234' + + domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) + self.mox.StubOutWithMock(domain, 'XMLDesc') + domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml) + + self.mox.StubOutWithMock(self.conn, '_lookup_by_name') + self.mox.StubOutWithMock(self.conn, '_has_min_version') + self.mox.StubOutWithMock(domain, 'blockRebase') + self.mox.StubOutWithMock(domain, 'blockCommit') + self.mox.StubOutWithMock(domain, 'blockJobInfo') + + self.conn._lookup_by_name('instance-%s' % instance['id']).\ + AndReturn(domain) + self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True) + + domain.blockRebase('vdb', 'vdb[1]', 0, 0) + + domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000}) + domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000}) + + self.mox.ReplayAll() + + self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid, + snapshot_id, self.delete_info_1) + + self.mox.VerifyAll() + + def test_volume_snapshot_delete_netdisk_2(self): + """Delete older snapshot -- blockCommit for libgfapi/network disk.""" + + class FakeNetdiskDomain(FakeVirtDomain): + def __init__(self, *args, **kwargs): + super(FakeNetdiskDomain, self).__init__(*args, **kwargs) + + def XMLDesc(self, *args): + return self.dom_netdisk_xml + + # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE + self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) + + instance = db.instance_create(self.c, self.inst) + snapshot_id = 'snapshot-1234' + + domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) + self.mox.StubOutWithMock(domain, 'XMLDesc') + domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml) + + self.mox.StubOutWithMock(self.conn, '_lookup_by_name') + self.mox.StubOutWithMock(self.conn, '_has_min_version') + self.mox.StubOutWithMock(domain, 'blockRebase') + self.mox.StubOutWithMock(domain, 'blockCommit') + self.mox.StubOutWithMock(domain, 'blockJobInfo') + + self.conn._lookup_by_name('instance-%s' % instance['id']).\ + AndReturn(domain) + self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True) + + domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0, + fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) + + domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000}) + domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000}) + + self.mox.ReplayAll() + + self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid, + snapshot_id, + self.delete_info_netdisk) + + self.mox.VerifyAll() diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 5da030bbaa..aa2108eee6 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -304,6 +304,9 @@ def repr_method(self): MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2) # BlockJobInfo management requirement MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1) +# Relative block commit (feature is detected, +# this version is only used for messaging) +MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION = (1, 2, 7) def libvirt_error_handler(context, err): @@ -1937,6 +1940,7 @@ def _volume_snapshot_delete(self, context, instance, volume_id, ##### Find dev name my_dev = None + active_disk = None xml = virt_dom.XMLDesc(0) xml_doc = etree.fromstring(xml) @@ -1944,6 +1948,8 @@ def _volume_snapshot_delete(self, context, instance, volume_id, device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) + active_disk_object = None + for guest_disk in device_info.devices: if (guest_disk.root_name != 'disk'): continue @@ -1954,7 +1960,12 @@ def _volume_snapshot_delete(self, context, instance, volume_id, if guest_disk.serial == volume_id: my_dev = guest_disk.target_dev - if my_dev is None: + active_disk = guest_disk.source_path + active_protocol = guest_disk.source_protocol + active_disk_object = guest_disk + break + + if my_dev is None or (active_disk is None and active_protocol is None): msg = _('Disk with id: %s ' 'not found attached to instance.') % volume_id LOG.debug('Domain XML: %s', xml) @@ -1962,15 +1973,57 @@ def _volume_snapshot_delete(self, context, instance, volume_id, LOG.debug("found device at %s", my_dev) + def _get_snap_dev(filename, backing_store): + if filename is None: + msg = _('filename cannot be None') + raise exception.NovaException(msg) + + # libgfapi delete + LOG.debug("XML: %s" % xml) + + LOG.debug("active disk object: %s" % active_disk_object) + + # determine reference within backing store for desired image + filename_to_merge = filename + matched_name = None + b = backing_store + index = None + + current_filename = active_disk_object.source_name.split('/')[1] + if current_filename == filename_to_merge: + return my_dev + '[0]' + + while b is not None: + source_filename = b.source_name.split('/')[1] + if source_filename == filename_to_merge: + LOG.debug('found match: %s' % b.source_name) + matched_name = b.source_name + index = b.index + break + + b = b.backing_store + + if matched_name is None: + msg = _('no match found for %s') % (filename_to_merge) + raise exception.NovaException(msg) + + LOG.debug('index of match (%s) is %s' % (b.source_name, index)) + + my_snap_dev = '%s[%s]' % (my_dev, index) + return my_snap_dev + if delete_info['merge_target_file'] is None: # pull via blockRebase() # Merge the most recent snapshot into the active image rebase_disk = my_dev - rebase_base = delete_info['file_to_merge'] - rebase_bw = 0 rebase_flags = 0 + rebase_base = delete_info['file_to_merge'] # often None + if active_protocol is not None: + rebase_base = _get_snap_dev(delete_info['file_to_merge'], + active_disk_object.backing_store) + rebase_bw = 0 LOG.debug('disk: %(disk)s, base: %(base)s, ' 'bw: %(bw)s, flags: %(flags)s', @@ -1985,27 +2038,53 @@ def _volume_snapshot_delete(self, context, instance, volume_id, if result == 0: LOG.debug('blockRebase started successfully') - while self._wait_for_block_job(virt_dom, rebase_disk, + while self._wait_for_block_job(virt_dom, my_dev, abort_on_error=True): LOG.debug('waiting for blockRebase job completion') time.sleep(0.5) else: # commit with blockCommit() - + my_snap_base = None + my_snap_top = None commit_disk = my_dev - commit_base = delete_info['merge_target_file'] - commit_top = delete_info['file_to_merge'] + commit_flags = 0 + + if active_protocol is not None: + my_snap_base = _get_snap_dev(delete_info['merge_target_file'], + active_disk_object.backing_store) + my_snap_top = _get_snap_dev(delete_info['file_to_merge'], + active_disk_object.backing_store) + try: + commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE + except AttributeError: + ver = '.'.join( + [str(x) for x in + MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION]) + msg = _("Relative blockcommit support was not detected. " + "Libvirt '%s' or later is required for online " + "deletion of network storage-backed volume " + "snapshots.") % ver + raise exception.Invalid(msg) + + commit_base = my_snap_base or delete_info['merge_target_file'] + commit_top = my_snap_top or delete_info['file_to_merge'] bandwidth = 0 - flags = 0 + + LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s ' + 'commit_base=%(commit_base)s ' + 'commit_top=%(commit_top)s ' + % {'commit_disk': commit_disk, + 'commit_base': commit_base, + 'commit_top': commit_top}) result = virt_dom.blockCommit(commit_disk, commit_base, commit_top, - bandwidth, flags) + bandwidth, commit_flags) if result == 0: LOG.debug('blockCommit started successfully') - while self._wait_for_block_job(virt_dom, commit_disk, + while self._wait_for_block_job(virt_dom, my_dev, abort_on_error=True): LOG.debug('waiting for blockCommit job completion') time.sleep(0.5) From 7f65d43b0465eb27c638e44395e5ca535574c2a1 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 24 Jul 2014 12:41:07 -0400 Subject: [PATCH 272/486] Revert "Deallocate the network if rescheduling for Revert "Deallocate the network if rescheduling for Ironic" This reverts commit 963ad71af4750e28745b6de262da11816b403801. The original fix, targeted towards Nova BM and Ironic tests is actually making us have more test failures. Lets go back to the original race... and then we can have a bit more time to test a proper fix. Closes-bug #1346424 Change-Id: Icbbe16ffef69132177165d21c727d791b62a232f --- nova/compute/manager.py | 7 +++---- nova/tests/compute/test_compute_mgr.py | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9627405b9a..b2f1e7d06f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1920,10 +1920,9 @@ def do_build_and_run_instance(context, instance, image, request_spec, self._set_instance_error_state(context, instance.uuid) return retry['exc'] = traceback.format_exception(*sys.exc_info()) - # The MAC address for this instance is tied to the host so if - # we're going to reschedule we have to free the network details - # and reallocate on the next host. - if self.driver.macs_for_instance(instance): + # dhcp_options are per host, so if they're set we need to + # deallocate the networks and reallocate on the next host. + if self.driver.dhcp_options_for_instance(instance): self._cleanup_allocated_networks(context, instance, requested_networks) diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index f269d1b536..09982cdf15 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1973,7 +1973,7 @@ def test_rescheduled_exception_do_not_deallocate_network(self): def test_rescheduled_exception_deallocate_network_if_dhcp(self): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute.driver, - 'macs_for_instance') + 'dhcp_options_for_instance') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') @@ -1985,7 +1985,7 @@ def test_rescheduled_exception_deallocate_network_if_dhcp(self): self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance['uuid'])) - self.compute.driver.macs_for_instance(self.instance).AndReturn( + self.compute.driver.dhcp_options_for_instance(self.instance).AndReturn( {'fake': 'options'}) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) From a8c8af82600e2905b558588e615b2e512adc7e7e Mon Sep 17 00:00:00 2001 From: "ChangBo Guo(gcb)" Date: Mon, 4 Aug 2014 19:36:20 +0800 Subject: [PATCH 273/486] libvirt: make guestfs methods always return list of tuples guestfs.GuestFS supports parameter python_return_dict with default False in 1.22 (backported in 1.20). This indicates that your program wants to receive Python dicts for methods in the API that return list of tuples. See http://libguestfs.org/guestfs-release-notes.1.html In a future version of libguestfs, its default value will be True, then that will break the code, to avoid this, we'd better force python_return_dict as False. See http://libguestfs.org/guestfs-python.3.html This commit makes method inspect_get_mountpoints always return list of tuples. Change-Id: I8211cb31a7a890f86cdd818767b3d5e8cfd5bbed --- nova/tests/virt/disk/vfs/fakeguestfs.py | 16 +++++++++++++--- nova/tests/virt/disk/vfs/test_guestfs.py | 11 +++++++++++ nova/virt/disk/vfs/guestfs.py | 10 ++++++---- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/nova/tests/virt/disk/vfs/fakeguestfs.py b/nova/tests/virt/disk/vfs/fakeguestfs.py index 7a3b33039c..a3933f1063 100644 --- a/nova/tests/virt/disk/vfs/fakeguestfs.py +++ b/nova/tests/virt/disk/vfs/fakeguestfs.py @@ -15,10 +15,15 @@ class GuestFS(object): SUPPORT_CLOSE_ON_EXIT = True + SUPPORT_RETURN_DICT = True def __init__(self, **kwargs): if not self.SUPPORT_CLOSE_ON_EXIT and 'close_on_exit' in kwargs: raise TypeError('close_on_exit') + if not self.SUPPORT_RETURN_DICT and 'python_return_dict' in kwargs: + raise TypeError('python_return_dict') + + self._python_return_dict = kwargs.get('python_return_dict', False) self.kwargs = kwargs self.drives = [] self.running = False @@ -53,9 +58,14 @@ def inspect_os(self): return ["/dev/guestvgf/lv_root"] def inspect_get_mountpoints(self, dev): - return [["/home", "/dev/mapper/guestvgf-lv_home"], - ["/", "/dev/mapper/guestvgf-lv_root"], - ["/boot", "/dev/vda1"]] + mountpoints = [("/home", "/dev/mapper/guestvgf-lv_home"), + ("/", "/dev/mapper/guestvgf-lv_root"), + ("/boot", "/dev/vda1")] + + if self.SUPPORT_RETURN_DICT and self._python_return_dict: + return dict(mountpoints) + else: + return mountpoints def mount_options(self, options, device, mntpoint): if mntpoint == "/": diff --git a/nova/tests/virt/disk/vfs/test_guestfs.py b/nova/tests/virt/disk/vfs/test_guestfs.py index 20b15488e5..66f8206b8d 100644 --- a/nova/tests/virt/disk/vfs/test_guestfs.py +++ b/nova/tests/virt/disk/vfs/test_guestfs.py @@ -236,3 +236,14 @@ def test_close_on_error(self): vfs.setup() self.assertNotIn('close_on_exit', vfs.handle.kwargs) vfs.teardown() + + def test_python_return_dict(self): + vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2") + vfs.setup() + self.assertFalse(vfs.handle.kwargs['python_return_dict']) + vfs.teardown() + self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False) + vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2") + vfs.setup() + self.assertNotIn('python_return_dict', vfs.handle.kwargs) + vfs.teardown() diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py index fabfde5ba4..a829820ab9 100644 --- a/nova/virt/disk/vfs/guestfs.py +++ b/nova/virt/disk/vfs/guestfs.py @@ -118,12 +118,14 @@ def setup(self): LOG.debug("Setting up appliance for %(imgfile)s %(imgfmt)s", {'imgfile': self.imgfile, 'imgfmt': self.imgfmt}) try: - self.handle = tpool.Proxy(guestfs.GuestFS(close_on_exit=False)) + self.handle = tpool.Proxy( + guestfs.GuestFS(python_return_dict=False, + close_on_exit=False)) except TypeError as e: - if 'close_on_exit' in str(e): + if 'close_on_exit' in str(e) or 'python_return_dict' in str(e): # NOTE(russellb) In case we're not using a version of - # libguestfs new enough to support the close_on_exit parameter, - # which was added in libguestfs 1.20. + # libguestfs new enough to support parameters close_on_exit + # and python_return_dict which were added in libguestfs 1.20. self.handle = tpool.Proxy(guestfs.GuestFS()) else: raise From 81348368c70cd39c6241e7da6d33629e577494f5 Mon Sep 17 00:00:00 2001 From: Yaguang Tang Date: Tue, 5 Aug 2014 11:06:10 +0800 Subject: [PATCH 274/486] Rename rbd.py to rbd_utils.py in libvirt driver directory In libvirt driver directory, rbd.py confict with global rbd library which is imported in rbd.py, so we rename rbd.py to rbd_utils.py. Change-Id: Ib62e430e678fe09c4a8475a636a8ecc11a194f5c Closes-Bug: #1352595 --- nova/tests/virt/libvirt/test_driver.py | 4 +- nova/tests/virt/libvirt/test_imagebackend.py | 16 ++--- nova/tests/virt/libvirt/test_rbd.py | 62 ++++++++++---------- nova/virt/libvirt/driver.py | 4 +- nova/virt/libvirt/imagebackend.py | 4 +- nova/virt/libvirt/{rbd.py => rbd_utils.py} | 0 6 files changed, 45 insertions(+), 45 deletions(-) rename nova/virt/libvirt/{rbd.py => rbd_utils.py} (100%) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 2cd581fea0..0a2c339518 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -80,7 +80,7 @@ from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import imagebackend -from nova.virt.libvirt import rbd +from nova.virt.libvirt import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt import netutils @@ -6129,7 +6129,7 @@ def fake_delete_instance_files(instance): "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"} conn.destroy(self.context, instance, []) - @mock.patch.object(rbd, 'RBDDriver') + @mock.patch.object(rbd_utils, 'RBDDriver') def test_cleanup_rbd(self, mock_driver): driver = mock_driver.return_value driver.cleanup_volumes = mock.Mock() diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py index de429ccf2a..3a87c2e397 100644 --- a/nova/tests/virt/libvirt/test_imagebackend.py +++ b/nova/tests/virt/libvirt/test_imagebackend.py @@ -29,7 +29,7 @@ from nova.tests import fake_processutils from nova.tests.virt.libvirt import fake_libvirt_utils from nova.virt.libvirt import imagebackend -from nova.virt.libvirt import rbd +from nova.virt.libvirt import rbd_utils CONF = cfg.CONF @@ -671,8 +671,8 @@ def setUp(self): group='libvirt') self.libvirt_utils = imagebackend.libvirt_utils self.utils = imagebackend.utils - self.mox.StubOutWithMock(rbd, 'rbd') - self.mox.StubOutWithMock(rbd, 'rados') + self.mox.StubOutWithMock(rbd_utils, 'rbd') + self.mox.StubOutWithMock(rbd_utils, 'rados') def test_cache(self): image = self.image_class(self.INSTANCE, self.NAME) @@ -744,7 +744,7 @@ def test_create_image(self): fn = self.mox.CreateMockAnything() fn(max_size=None, target=self.TEMPLATE_PATH) - rbd.rbd.RBD_FEATURE_LAYERING = 1 + rbd_utils.rbd.RBD_FEATURE_LAYERING = 1 fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) @@ -770,7 +770,7 @@ def test_create_image_resize(self): full_size = self.SIZE * 2 fn(max_size=full_size, target=self.TEMPLATE_PATH) - rbd.rbd.RBD_FEATURE_LAYERING = 1 + rbd_utils.rbd.RBD_FEATURE_LAYERING = 1 fake_processutils.fake_execute_clear_log() fake_processutils.stub_out_processutils_execute(self.stubs) @@ -797,7 +797,7 @@ def test_create_image_resize(self): self.mox.VerifyAll() def test_create_image_already_exists(self): - rbd.rbd.RBD_FEATURE_LAYERING = 1 + rbd_utils.rbd.RBD_FEATURE_LAYERING = 1 image = self.image_class(self.INSTANCE, self.NAME) self.mox.StubOutWithMock(image, 'check_image_exists') @@ -896,8 +896,8 @@ def test_image_rbd(self): pool = "FakePool" self.flags(images_rbd_pool=pool, group='libvirt') self.flags(images_rbd_ceph_conf=conf, group='libvirt') - self.mox.StubOutWithMock(rbd, 'rbd') - self.mox.StubOutWithMock(rbd, 'rados') + self.mox.StubOutWithMock(rbd_utils, 'rbd') + self.mox.StubOutWithMock(rbd_utils, 'rados') self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd) def test_image_default(self): diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/virt/libvirt/test_rbd.py index 415f2eccd7..bcbdc25f59 100644 --- a/nova/tests/virt/libvirt/test_rbd.py +++ b/nova/tests/virt/libvirt/test_rbd.py @@ -17,7 +17,7 @@ from nova.openstack.common import log as logging from nova import test from nova import utils -from nova.virt.libvirt import rbd +from nova.virt.libvirt import rbd_utils LOG = logging.getLogger(__name__) @@ -53,8 +53,8 @@ class RbdTestCase(test.NoDBTestCase): - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def setUp(self, mock_rados, mock_rbd): super(RbdTestCase, self).setUp() @@ -75,7 +75,7 @@ def setUp(self, mock_rados, mock_rbd): self.mock_rbd.RBD.Error = Exception self.rbd_pool = 'rbd' - self.driver = rbd.RBDDriver(self.rbd_pool, None, None) + self.driver = rbd_utils.RBDDriver(self.rbd_pool, None, None) self.volume_name = u'volume-00000001' @@ -101,9 +101,9 @@ def test_bad_locations(self): self.assertFalse(self.driver.is_cloneable({'url': loc}, {'disk_format': 'raw'})) - @mock.patch.object(rbd.RBDDriver, '_get_fsid') - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://abc/pool/image/snap'} @@ -111,7 +111,7 @@ def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid): self.assertTrue(self.driver.is_cloneable(location, info)) self.assertTrue(mock_get_fsid.called) - @mock.patch.object(rbd.RBDDriver, '_get_fsid') + @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid') def test_uncloneable_different_fsid(self, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://def/pool/image/snap'} @@ -119,10 +119,10 @@ def test_uncloneable_different_fsid(self, mock_get_fsid): self.driver.is_cloneable(location, {'disk_format': 'raw'})) self.assertTrue(mock_get_fsid.called) - @mock.patch.object(rbd.RBDDriver, '_get_fsid') - @mock.patch.object(rbd, 'RBDVolumeProxy') - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid') + @mock.patch.object(rbd_utils, 'RBDVolumeProxy') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy, mock_get_fsid): mock_get_fsid.return_value = 'abc' @@ -136,7 +136,7 @@ def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy, snapshot='snap', read_only=True) self.assertTrue(mock_get_fsid.called) - @mock.patch.object(rbd.RBDDriver, '_get_fsid') + @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid') def test_uncloneable_bad_format(self, mock_get_fsid): mock_get_fsid.return_value = 'abc' location = {'url': 'rbd://abc/pool/image/snap'} @@ -153,9 +153,9 @@ def test_get_mon_addrs(self, mock_execute): ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver.get_mon_addrs()) - @mock.patch.object(rbd, 'RADOSClient') - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils, 'RADOSClient') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def test_clone(self, mock_rados, mock_rbd, mock_client): pool = u'images' image = u'image-name' @@ -184,7 +184,7 @@ def _inner(): rbd.clone.assert_called_once_with(*args, **kwargs) self.assertEqual(client.__enter__.call_count, 2) - @mock.patch.object(rbd, 'RBDVolumeProxy') + @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_resize(self, mock_proxy): size = 1024 proxy = mock_proxy.return_value @@ -192,24 +192,24 @@ def test_resize(self, mock_proxy): self.driver.resize(self.volume_name, size) proxy.resize.assert_called_once_with(size) - @mock.patch.object(rbd.RBDDriver, '_disconnect_from_rados') - @mock.patch.object(rbd.RBDDriver, '_connect_to_rados') - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados') + @mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def test_rbd_volume_proxy_init(self, mock_rados, mock_rbd, mock_connect_from_rados, mock_disconnect_from_rados): mock_connect_from_rados.return_value = (None, None) mock_disconnect_from_rados.return_value = (None, None) - with rbd.RBDVolumeProxy(self.driver, self.volume_name): + with rbd_utils.RBDVolumeProxy(self.driver, self.volume_name): mock_connect_from_rados.assert_called_once_with(None) self.assertFalse(mock_disconnect_from_rados.called) mock_disconnect_from_rados.assert_called_once_with(None, None) - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def test_connect_to_rados_default(self, mock_rados, mock_rbd): ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.connect.called) @@ -218,8 +218,8 @@ def test_connect_to_rados_default(self, mock_rados, mock_rbd): self.assertEqual(ret[1], self.mock_rados.Rados.ioctx) self.mock_rados.Rados.open_ioctx.assert_called_with(self.rbd_pool) - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd): ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.connect.called) @@ -228,7 +228,7 @@ def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd): self.assertEqual(ret[1], self.mock_rados.Rados.ioctx) self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool') - @mock.patch.object(rbd, 'rados') + @mock.patch.object(rbd_utils, 'rados') def test_connect_to_rados_error(self, mock_rados): mock_rados.Rados.open_ioctx.side_effect = mock_rados.Error self.assertRaises(mock_rados.Error, self.driver._connect_to_rados) @@ -257,7 +257,7 @@ def test_ceph_args_rbd_user_and_ceph_conf(self): self.assertEqual(['--id', 'foo', '--conf', '/path/bar.conf'], self.driver.ceph_args()) - @mock.patch.object(rbd, 'RBDVolumeProxy') + @mock.patch.object(rbd_utils, 'RBDVolumeProxy') def test_exists(self, mock_proxy): snapshot = 'snap' proxy = mock_proxy.return_value @@ -267,9 +267,9 @@ def test_exists(self, mock_proxy): proxy.__enter__.assert_called_once_with() proxy.__exit__.assert_called_once_with(None, None, None) - @mock.patch.object(rbd, 'rbd') - @mock.patch.object(rbd, 'rados') - @mock.patch.object(rbd, 'RADOSClient') + @mock.patch.object(rbd_utils, 'rbd') + @mock.patch.object(rbd_utils, 'rados') + @mock.patch.object(rbd_utils, 'RADOSClient') def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd): instance = {'uuid': '12345'} diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 5273efab45..827f1ae298 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -92,7 +92,7 @@ from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import lvm -from nova.virt.libvirt import rbd +from nova.virt.libvirt import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt import netutils from nova.virt import watchdog_actions @@ -1101,7 +1101,7 @@ def cleanup(self, context, instance, network_info, block_device_info=None, @staticmethod def _get_rbd_driver(): - return rbd.RBDDriver( + return rbd_utils.RBDDriver( pool=CONF.libvirt.images_rbd_pool, ceph_conf=CONF.libvirt.images_rbd_ceph_conf, rbd_user=CONF.libvirt.rbd_user) diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 184d7fe742..962d47f280 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -34,7 +34,7 @@ from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import lvm -from nova.virt.libvirt import rbd +from nova.virt.libvirt import rbd_utils from nova.virt.libvirt import utils as libvirt_utils __imagebackend_opts = [ @@ -525,7 +525,7 @@ def __init__(self, instance=None, disk_name=None, path=None, **kwargs): self.rbd_user = CONF.libvirt.rbd_user self.ceph_conf = CONF.libvirt.images_rbd_ceph_conf - self.driver = rbd.RBDDriver( + self.driver = rbd_utils.RBDDriver( pool=self.pool, ceph_conf=self.ceph_conf, rbd_user=self.rbd_user) diff --git a/nova/virt/libvirt/rbd.py b/nova/virt/libvirt/rbd_utils.py similarity index 100% rename from nova/virt/libvirt/rbd.py rename to nova/virt/libvirt/rbd_utils.py From 5b96d7b125aeb5bfc91a59939423cc888ecd7ef4 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Fri, 18 Jul 2014 12:06:34 +0800 Subject: [PATCH 275/486] Add missed discoverable policy rules for flavor-manage v3 Add discoverable policy rule for v3 extension flavor-manage, Although without this patch, it also works. But that's used to indicate to user there is option for flavor-manager discoverable. Change-Id: I23f8bbaccfa104a12e4a4c9eea8bf04107b57d43 --- etc/nova/policy.json | 1 + 1 file changed, 1 insertion(+) diff --git a/etc/nova/policy.json b/etc/nova/policy.json index cc5b8ea4a8..7cd1049dbf 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -137,6 +137,7 @@ "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api", "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api", "compute_extension:flavormanage": "rule:admin_api", + "compute_extension:v3:flavor-manage:discoverable": "", "compute_extension:v3:flavor-manage": "rule:admin_api", "compute_extension:floating_ip_dns": "", "compute_extension:floating_ip_pools": "", From 9830d97564562db0ec80fb54d8376c2ca8df6dfd Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 14 Jul 2014 05:06:12 -0700 Subject: [PATCH 276/486] Diagnostics: add validation for types When creating diagnostics the calling application is able to pass cpu, nic and disk diagnostics. This patch validates that those are of the correct type. TrivialFix Change-Id: Id76c37e2f3ff50b0029bb1fa26348e43aee432f1 --- nova/tests/virt/test_diagnostics.py | 21 +++++++++++++++++++++ nova/virt/diagnostics.py | 15 +++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/nova/tests/virt/test_diagnostics.py b/nova/tests/virt/test_diagnostics.py index 3f0b5b3ca4..f3969fc09f 100644 --- a/nova/tests/virt/test_diagnostics.py +++ b/nova/tests/virt/test_diagnostics.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import exception from nova import test from nova.virt import diagnostics @@ -208,3 +209,23 @@ def test_diagnostics_serialize(self): 'version': '1.0'} result = diags.serialize() self.assertEqual(expected, result) + + def test_diagnostics_invalid_input(self): + self.assertRaises(exception.InvalidInput, + diagnostics.Diagnostics, + cpu_details='invalid type') + self.assertRaises(exception.InvalidInput, + diagnostics.Diagnostics, + cpu_details=['invalid entry']) + self.assertRaises(exception.InvalidInput, + diagnostics.Diagnostics, + nic_details='invalid type') + self.assertRaises(exception.InvalidInput, + diagnostics.Diagnostics, + nic_details=['invalid entry']) + self.assertRaises(exception.InvalidInput, + diagnostics.Diagnostics, + disk_details='invalid type') + self.assertRaises(exception.InvalidInput, + diagnostics.Diagnostics, + disk_details=['invalid entry']) diff --git a/nova/virt/diagnostics.py b/nova/virt/diagnostics.py index f9fea5c572..2538ad1c09 100644 --- a/nova/virt/diagnostics.py +++ b/nova/virt/diagnostics.py @@ -15,6 +15,9 @@ import six +from nova import exception +from nova.i18n import _ + class CpuDiagnostics(object): @@ -119,19 +122,31 @@ def __init__(self, state=None, driver=None, hypervisor_os=None, self.uptime = uptime self.config_drive = config_drive if cpu_details: + self._validate_type(cpu_details, CpuDiagnostics, 'cpu_details') self.cpu_details = cpu_details else: self.cpu_details = [] if nic_details: + self._validate_type(nic_details, NicDiagnostics, 'nic_details') self.nic_details = nic_details else: self.nic_details = [] if disk_details: + self._validate_type(disk_details, DiskDiagnostics, 'disk_details') self.disk_details = disk_details else: self.disk_details = [] self.memory_details = MemoryDiagnostics() + def _validate_type(self, input, type, str_input): + if not isinstance(input, list): + reason = _("Invalid type for %s") % str_input + raise exception.InvalidInput(reason=reason) + for i in input: + if not isinstance(i, type): + reason = _("Invalid type for %s entry") % str_input + raise exception.InvalidInput(reason=reason) + def add_cpu(self, time=0): self.cpu_details.append(CpuDiagnostics(time=time)) From 0d9bab118feb587d3ac7859b8cf17b8b1388bf2c Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Wed, 16 Oct 2013 09:24:38 -0700 Subject: [PATCH 277/486] Removes unnecessary instructions in test_hypervapi Lines have no actual effect on the tests. Removing them will facilitate further unit tests. Partial-Bug: #1220256 Change-Id: I805aba5e75f2640221b2dc0034950c0832841247 --- nova/tests/virt/hyperv/test_hypervapi.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index a37ed6b63d..37310a6806 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -134,10 +134,6 @@ def fake_sleep(ms): pass self.stubs.Set(time, 'sleep', fake_sleep) - def fake_vmutils__init__(self, host='.'): - pass - vmutils.VMUtils.__init__ = fake_vmutils__init__ - self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils) self._mox.StubOutWithMock(fake.PathUtils, 'open') self._mox.StubOutWithMock(fake.PathUtils, 'copyfile') From 37520a7dc14971b2d244f37febdb9fb13edbfd2f Mon Sep 17 00:00:00 2001 From: Swami Reddy Date: Wed, 18 Jun 2014 15:49:18 +0530 Subject: [PATCH 278/486] Add instanceset info to StartInstance response Currently startinstance response doesn't have the instanceset information with InstanceID, current state and previous state. It just returns the "True". As per the AWS API reference document, the StartInstance response elements should include the instanceset information as below: req-a7326465-5ce2-4ed6-ab89-394b38cca85f i-00000001 16 running 80 stopped Included the instanceset in to startinstance response elements and updated the test cases for startinstance response elements in nova/tests/api/ec2/test_cloud.py file. Change-Id: I08ef7ed88f983b66a30c76d6b7b754222097a03b Closes-bug: #1321220 --- nova/api/ec2/cloud.py | 14 +++++++++++++- nova/tests/api/ec2/test_cloud.py | 16 ++++++++++++++-- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 555e9b6ef5..1b3fa5162c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1060,6 +1060,17 @@ def _format_stop_instances(self, context, instance_ids, previous_states): instances_set.append(i) return {'instancesSet': instances_set} + def _format_start_instances(self, context, instance_id, previous_states): + instances_set = [] + for (ec2_id, previous_state) in zip(instance_id, previous_states): + i = {} + i['instanceId'] = ec2_id + i['previousState'] = _state_description(previous_state['vm_state'], + previous_state['shutdown_terminate']) + i['currentState'] = _state_description(vm_states.ACTIVE, True) + instances_set.append(i) + return {'instancesSet': instances_set} + def _format_instance_bdm(self, context, instance_uuid, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType.""" @@ -1459,7 +1470,8 @@ def start_instances(self, context, instance_id, **kwargs): for instance in instances: extensions.check_compute_policy(context, 'start', instance) self.compute_api.start(context, instance) - return True + return self._format_start_instances(context, instance_id, + instances) def _get_image(self, context, ec2_id): try: diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 8c80885c2d..d79f3b8881 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -2237,8 +2237,14 @@ def test_stop_start_instance(self): result = self.cloud.stop_instances(self.context, [instance_id]) self.assertEqual(result, expected) + expected = {'instancesSet': [ + {'instanceId': 'i-00000001', + 'previousState': {'code': 80, + 'name': 'stopped'}, + 'currentState': {'code': 16, + 'name': 'running'}}]} result = self.cloud.start_instances(self.context, [instance_id]) - self.assertTrue(result) + self.assertEqual(result, expected) expected = {'instancesSet': [ {'instanceId': 'i-00000001', @@ -2267,8 +2273,14 @@ def test_start_instances(self): result = self.cloud.stop_instances(self.context, [instance_id]) self.assertTrue(result) + expected = {'instancesSet': [ + {'instanceId': 'i-00000001', + 'previousState': {'code': 80, + 'name': 'stopped'}, + 'currentState': {'code': 16, + 'name': 'running'}}]} result = self.cloud.start_instances(self.context, [instance_id]) - self.assertTrue(result) + self.assertEqual(result, expected) expected = {'instancesSet': [ {'instanceId': 'i-00000001', From 5effbb1a385b6f85d28873968d0ee7112e6d0d59 Mon Sep 17 00:00:00 2001 From: Chris Buccella Date: Sun, 8 Jun 2014 04:28:06 +0000 Subject: [PATCH 279/486] Migrate test_glance from mox to mock A few tests in test_glance.py are still using mox. Update these remaining tests to use mock, as this is the general direction in which the project is moving. Change-Id: I81fc36467b158625f29593c7173542f843244311 --- nova/tests/image/test_glance.py | 40 +++++++++++---------------------- 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 394c3d1fbd..3ae970425c 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -24,7 +24,6 @@ import glanceclient.exc import mock -import mox from oslo.config import cfg import testtools @@ -112,12 +111,10 @@ def setUp(self): self.client = glance_stubs.StubGlanceClient() self.service = self._create_image_service(self.client) self.context = context.RequestContext('fake', 'fake', auth_token=True) - self.mox = mox.Mox() self.files_to_clean = [] def tearDown(self): super(TestGlanceImageService, self).tearDown() - self.mox.UnsetStubs() for f in self.files_to_clean: try: os.unlink(f) @@ -238,7 +235,8 @@ def get(self, image_id): os.remove(client.s_tmpfname) os.remove(tmpfname) - def test_download_module_filesystem_match(self): + @mock.patch('nova.virt.libvirt.utils.copy_image') + def test_download_module_filesystem_match(self, mock_copy_image): mountpoint = '/' fs_id = 'someid' @@ -259,8 +257,6 @@ def data(self, image_id): 'transfer module should have intercepted ' 'it.') - self.mox.StubOutWithMock(lv_utils, 'copy_image') - image_id = 1 # doesn't matter client = MyGlanceStubClient() self.flags(allowed_direct_url_schemes=['file'], group='glance') @@ -272,11 +268,8 @@ def data(self, image_id): self.flags(group='image_file_url:gluster', mountpoint=mountpoint) dest_file = os.devnull - lv_utils.copy_image(mox.IgnoreArg(), dest_file) - - self.mox.ReplayAll() service.download(self.context, image_id, dst_path=dest_file) - self.mox.VerifyAll() + mock_copy_image.assert_called_once_with('/' + os.devnull, os.devnull) def test_download_module_no_filesystem_match(self): mountpoint = '/' @@ -357,7 +350,8 @@ def _fake_copyfile(source, dest): service.download(self.context, image_id, dst_path=os.devnull) self.assertTrue(self.copy_called) - def test_download_module_file_bad_module(self): + @mock.patch('nova.virt.libvirt.utils.copy_image') + def test_download_module_file_bad_module(self, mock_copy_image): _, data_filename = self._get_tempfile() file_url = 'applesauce://%s' % data_filename @@ -375,8 +369,6 @@ def data(self, image_id): return "someData" self.flags(allowed_direct_url_schemes=['applesauce'], group='glance') - - self.mox.StubOutWithMock(lv_utils, 'copy_image') self.flags(allowed_direct_url_schemes=['file'], group='glance') image_id = 1 # doesn't matter client = MyGlanceStubClient() @@ -384,11 +376,9 @@ def data(self, image_id): # by not calling copyfileobj in the file download module we verify # that the requirements were not met for its use - self.mox.ReplayAll() service.download(self.context, image_id, dst_path=os.devnull) - self.mox.VerifyAll() - self.assertTrue(client.data_called) + self.assertFalse(mock_copy_image.called) def test_client_forbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): @@ -1253,18 +1243,14 @@ def test_get_ipv6_api_servers(self): class TestUpdateGlanceImage(test.NoDBTestCase): - def test_start(self): + @mock.patch('nova.image.glance.GlanceImageService') + def test_start(self, mock_glance_image_service): consumer = glance.UpdateGlanceImage( 'context', 'id', 'metadata', 'stream') - image_service = self.mox.CreateMock(glance.GlanceImageService) - - self.mox.StubOutWithMock(glance, 'get_remote_image_service') - - glance.get_remote_image_service( - 'context', 'id').AndReturn((image_service, 'image_id')) - image_service.update( - 'context', 'image_id', 'metadata', 'stream', purge_props=False) - self.mox.ReplayAll() + with mock.patch.object(glance, 'get_remote_image_service') as a_mock: + a_mock.return_value = (mock_glance_image_service, 'image_id') - consumer.start() + consumer.start() + mock_glance_image_service.update.assert_called_with( + 'context', 'image_id', 'metadata', 'stream', purge_props=False) From 6d12d70a42b922225dd16338ec9db8c60df89e38 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 24 Jul 2014 09:29:08 -0700 Subject: [PATCH 280/486] Make network/api.py use Network object for associations This makes the network/api.py module use the Network object for network host and project associate/disassociate operations. There was very little testing of this, so this patch adds/corrects tests to verify object behavior. Related to blueprint compute-manager-objects-juno Change-Id: I5c0d61be35fc4edf647e572c746515988651bcc0 --- nova/network/api.py | 21 ++++++------- nova/tests/network/test_api.py | 55 ++++++++++++++++++++++++++-------- 2 files changed, 54 insertions(+), 22 deletions(-) diff --git a/nova/network/api.py b/nova/network/api.py index 01bbcdc4c3..41f3916bb6 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -97,7 +97,8 @@ def delete(self, context, network_uuid): @wrap_check_policy def disassociate(self, context, network_uuid): network = self.get(context, network_uuid) - self.db.network_disassociate(context, network['id']) + objects.Network.disassociate(context, network.id, + host=True, project=True) @wrap_check_policy def get_fixed_ip(self, context, id): @@ -317,21 +318,21 @@ def add_network_to_project(self, context, project_id, network_uuid=None): def associate(self, context, network_uuid, host=base_api.SENTINEL, project=base_api.SENTINEL): """Associate or disassociate host or project to network.""" - network_id = self.get(context, network_uuid)['id'] + network = self.get(context, network_uuid) if host is not base_api.SENTINEL: if host is None: - self.db.network_disassociate(context, network_id, - disassociate_host=True, - disassociate_project=False) + objects.Network.disassociate(context, network.id, + host=True, project=False) else: - self.db.network_set_host(context, network_id, host) + network.host = host + network.save() if project is not base_api.SENTINEL: if project is None: - self.db.network_disassociate(context, network_id, - disassociate_host=False, - disassociate_project=True) + objects.Network.disassociate(context, network.id, + host=False, project=True) else: - self.db.network_associate(context, project, network_id, True) + objects.Network.associate(context, project, + network_id=network.id, force=True) @wrap_check_policy def get_instance_nw_info(self, context, instance, **kwargs): diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index da1e96270f..89164db916 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -339,21 +339,52 @@ def test_is_multi_host_network_has_project_id_multi(self): def test_is_multi_host_network_has_project_id_non_multi(self): self._test_is_multi_host_network_has_project_id(False) - def test_network_disassociate_project(self): - def fake_network_disassociate(ctx, network_id, disassociate_host, - disassociate_project): - self.assertEqual(network_id, 1) - self.assertEqual(disassociate_host, False) - self.assertEqual(disassociate_project, True) + @mock.patch('nova.objects.Network.get_by_uuid') + @mock.patch('nova.objects.Network.disassociate') + def test_network_disassociate_project(self, mock_disassociate, mock_get): + net_obj = objects.Network(context=self.context, id=1) + mock_get.return_value = net_obj + self.network_api.associate(self.context, FAKE_UUID, project=None) + mock_disassociate.assert_called_once_with(self.context, net_obj.id, + host=False, project=True) + + @mock.patch('nova.objects.Network.get_by_uuid') + @mock.patch('nova.objects.Network.disassociate') + def test_network_disassociate_host(self, mock_disassociate, mock_get): + net_obj = objects.Network(context=self.context, id=1) + mock_get.return_value = net_obj + self.network_api.associate(self.context, FAKE_UUID, host=None) + mock_disassociate.assert_called_once_with(self.context, net_obj.id, + host=True, project=False) - def fake_get(context, network_uuid): - return {'id': 1} + @mock.patch('nova.objects.Network.get_by_uuid') + @mock.patch('nova.objects.Network.associate') + def test_network_associate_project(self, mock_associate, mock_get): + net_obj = objects.Network(context=self.context, id=1) + mock_get.return_value = net_obj + project = mock.sentinel.project + self.network_api.associate(self.context, FAKE_UUID, project=project) + mock_associate.assert_called_once_with(self.context, project, + network_id=net_obj.id, + force=True) - self.stubs.Set(self.network_api.db, 'network_disassociate', - fake_network_disassociate) - self.stubs.Set(self.network_api, 'get', fake_get) + @mock.patch('nova.objects.Network.get_by_uuid') + @mock.patch('nova.objects.Network.save') + def test_network_associate_host(self, mock_save, mock_get): + net_obj = objects.Network(context=self.context, id=1) + mock_get.return_value = net_obj + host = str(mock.sentinel.host) + self.network_api.associate(self.context, FAKE_UUID, host=host) + mock_save.assert_called_once_with() + self.assertEqual(host, net_obj.host) - self.network_api.associate(self.context, FAKE_UUID, project=None) + @mock.patch('nova.objects.Network.get_by_uuid') + @mock.patch('nova.objects.Network.disassociate') + def test_network_disassociate(self, mock_disassociate, mock_get): + mock_get.return_value = objects.Network(context=self.context, id=123) + self.network_api.disassociate(self.context, FAKE_UUID) + mock_disassociate.assert_called_once_with(self.context, 123, + project=True, host=True) def _test_refresh_cache(self, method, *args, **kwargs): # This test verifies that no call to get_instance_nw_info() is made From f4454f4c6962dd2c57c08dc7fecfcdebe7924e3b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 5 Aug 2014 11:34:09 -0700 Subject: [PATCH 281/486] Add expire reservations in backport position. Change-Id: If0e58da50ebe9b50b414737a9bd81d93752506e2 Related-bug: #1348720 --- .../234_add_expire_reservations_index.py | 59 +++++++++++++++++++ .../migrate_repo/versions/234_placeholder.py | 26 -------- 2 files changed, 59 insertions(+), 26 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py b/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py new file mode 100644 index 0000000000..917ea1461e --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py @@ -0,0 +1,59 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + +from nova.i18n import _LI +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def _get_deleted_expire_index(table): + members = sorted(['deleted', 'expire']) + for idx in table.indexes: + if sorted(idx.columns.keys()) == members: + return idx + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + reservations = Table('reservations', meta, autoload=True) + if _get_deleted_expire_index(reservations): + LOG.info(_LI('Skipped adding reservations_deleted_expire_idx ' + 'because an equivalent index already exists.')) + return + + # Based on expire_reservations query + # from: nova/db/sqlalchemy/api.py + index = Index('reservations_deleted_expire_idx', + reservations.c.deleted, reservations.c.expire) + + index.create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + reservations = Table('reservations', meta, autoload=True) + + index = _get_deleted_expire_index(reservations) + if index: + index.drop(migrate_engine) + else: + LOG.info(_LI('Skipped removing reservations_deleted_expire_idx ' + 'because index does not exist.')) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py b/nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py deleted file mode 100644 index f5c5483cda..0000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is a placeholder for Icehouse backports. -# Do not use this number for new Juno work. New Juno work starts after -# all the placeholders. -# -# See blueprint backportable-db-migrations-juno -# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html - - -def upgrade(migrate_engine): - pass - - -def downgrade(migration_engine): - pass From 37952d0804b5bd1db2bab7633c22ac8bd3379ccc Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 5 Aug 2014 11:37:17 -0700 Subject: [PATCH 282/486] DB: use assertIsNotNone for unit test Commit 48de2895b9a550a0944b31212349275605a4061d added a validation that should have used assertIsNotNone. TrivialFix Change-Id: Ib6b82d90bb163f7ee06c916e52d3dd40ff2ba34d --- nova/tests/db/test_db_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 9ffa0241e5..75ea4f2f1a 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -4094,7 +4094,7 @@ def test_floating_ip_update(self): } floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'], values) - self.assertIsNot(floating_ref, None) + self.assertIsNotNone(floating_ref) updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id']) self._assertEqualObjects(updated_float_ip, values, ignored_keys=['id', 'address', 'updated_at', From d3854f2c05502c7407d716da7d1bd7f1ca3e0a15 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Fri, 18 Jul 2014 18:45:30 +1200 Subject: [PATCH 283/486] Add method for deallocating networks on reschedule The original call to dhcp_options_for_instance() is not a great way to check whether deallocation of networks should happen on reschedules. This adds a new virt driver method 'deallocate_networks_on_reschedule' which can be used by a virt driver to say whether reschedules should deallocate networks first. This defaults to False and modifies the baremetal virt driver to return True. Ironic virt driver will also need to return True. Closes-Bug: 1342919 Change-Id: I54a3252ab15e2d8b596ccc90eb4755405021f1da --- nova/compute/manager.py | 6 +++--- nova/tests/compute/test_compute_mgr.py | 12 ++++++++---- nova/virt/baremetal/driver.py | 3 +++ nova/virt/driver.py | 4 ++++ 4 files changed, 18 insertions(+), 7 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b2f1e7d06f..259c0cf972 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1920,9 +1920,9 @@ def do_build_and_run_instance(context, instance, image, request_spec, self._set_instance_error_state(context, instance.uuid) return retry['exc'] = traceback.format_exception(*sys.exc_info()) - # dhcp_options are per host, so if they're set we need to - # deallocate the networks and reallocate on the next host. - if self.driver.dhcp_options_for_instance(instance): + # NOTE(comstud): Deallocate networks if the driver wants + # us to do so. + if self.driver.deallocate_networks_on_reschedule(instance): self._cleanup_allocated_networks(context, instance, requested_networks) diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 09982cdf15..7cf511e2c1 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1942,6 +1942,8 @@ def test_rescheduled_exception_without_retry(self): def test_rescheduled_exception_do_not_deallocate_network(self): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') + self.mox.StubOutWithMock(self.compute.driver, + 'deallocate_networks_on_reschedule') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') @@ -1953,6 +1955,8 @@ def test_rescheduled_exception_do_not_deallocate_network(self): self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance['uuid'])) + self.compute.driver.deallocate_networks_on_reschedule( + self.instance).AndReturn(False) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, @@ -1970,10 +1974,10 @@ def test_rescheduled_exception_do_not_deallocate_network(self): block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) - def test_rescheduled_exception_deallocate_network_if_dhcp(self): + def test_rescheduled_exception_deallocate_network(self): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute.driver, - 'dhcp_options_for_instance') + 'deallocate_networks_on_reschedule') self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks') self.mox.StubOutWithMock(self.compute.compute_task_api, 'build_instances') @@ -1985,8 +1989,8 @@ def test_rescheduled_exception_deallocate_network_if_dhcp(self): self.filter_properties).AndRaise( exception.RescheduledException(reason='', instance_uuid=self.instance['uuid'])) - self.compute.driver.dhcp_options_for_instance(self.instance).AndReturn( - {'fake': 'options'}) + self.compute.driver.deallocate_networks_on_reschedule( + self.instance).AndReturn(True) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute.compute_task_api.build_instances(self.context, diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index e759bd7055..ec99d4de45 100644 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -218,6 +218,9 @@ def _stop_firewall(self, instance, network_info): self.firewall_driver.unfilter_instance( instance, network_info) + def deallocate_networks_on_reschedule(self, instance): + return True + def macs_for_instance(self, instance): context = nova_context.get_admin_context() node_uuid = self._require_node(instance) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 37338ac6f4..b64459e2ad 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -1088,6 +1088,10 @@ def interface_stats(self, instance_name, iface_id): """ raise NotImplementedError() + def deallocate_networks_on_reschedule(self, instance): + """Does the driver want networks deallocated on reschedule?""" + return False + def macs_for_instance(self, instance): """What MAC addresses must this instance have? From 9283379849906f74047e47a679326e08e923fecc Mon Sep 17 00:00:00 2001 From: Vladik Romanovsky Date: Fri, 27 Jun 2014 08:19:39 -0400 Subject: [PATCH 284/486] libvirt: saving the lxc rootfs device in instance metadata Currently, nbd/loop device, to which lxc image has been connected, is being saved as instance root device, which causing the attached bug. Saving the device name in instance system metadata instead. Closes-Bug: #1330981 Change-Id: I3a0533a89ea8af61349118a39b822f4af146cc04 --- nova/tests/virt/libvirt/test_driver.py | 23 +++++++++++++++++++++++ nova/virt/libvirt/driver.py | 19 +++++++++---------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 76aa472750..5deebef958 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -7847,6 +7847,29 @@ def test_create_without_pause(self): self.assertEqual(0, create.call_args_list[0][1]['launch_flags']) self.assertEqual(0, domain.resume.call_count) + def test_lxc_create_and_rootfs_saved(self): + self.flags(virt_type='lxc', group='libvirt') + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = db.instance_create(self.context, self.test_instance) + inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid']) + + with contextlib.nested( + mock.patch('nova.virt.disk.api.setup_container', + return_value='/dev/nbd1'), + mock.patch('nova.virt.disk.api.clean_lxc_namespace'), + mock.patch('nova.openstack.common.fileutils.ensure_tree'), + mock.patch.object(conn.image_backend, 'image'), + mock.patch.object(conn, '_enable_hairpin'), + mock.patch.object(conn, 'get_info', + return_value={'state': power_state.RUNNING}) + ): + conn._conn.defineXML = mock.Mock() + conn._create_domain('xml', instance=inst_obj) + self.assertEqual('/dev/nbd1', + inst_obj.system_metadata.get( + 'rootfs_device_name')) + def _test_create_with_network_events(self, neutron_failure=None, power_on=True): self.flags(vif_driver="nova.tests.fake_network.FakeVIFDriver", diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 37351fd08e..014a7acb4e 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -890,8 +890,8 @@ def unplug_vifs(self, instance, network_info): def _teardown_container(self, instance): inst_path = libvirt_utils.get_instance_path(instance) container_dir = os.path.join(inst_path, 'rootfs') - container_root_device = instance.get('root_device_name') - disk.teardown_container(container_dir, container_root_device) + rootfs_dev = instance.system_metadata.get('rootfs_device_name') + disk.teardown_container(container_dir, rootfs_dev) def _destroy(self, instance): try: @@ -3598,15 +3598,14 @@ def _create_domain_setup_lxc(self, instance): fileutils.ensure_tree(container_dir) image = self.image_backend.image(instance, 'disk') - container_root_device = disk.setup_container(image.path, - container_dir=container_dir, - use_cow=CONF.use_cow_images) + rootfs_dev = disk.setup_container(image.path, + container_dir=container_dir, + use_cow=CONF.use_cow_images) + try: - # Note(GuanQiang): save container root device name here, used for - # detaching the linked image device when deleting - # the lxc instance. - if container_root_device: - instance.root_device_name = container_root_device + # Save rootfs device to disconnect it when deleting the instance + if rootfs_dev: + instance.system_metadata['rootfs_device_name'] = rootfs_dev instance.save() except Exception: with excutils.save_and_reraise_exception(): From c5402ef4fc509047d513a715a1c14e9b4ba9674f Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Wed, 16 Jul 2014 01:02:14 -0700 Subject: [PATCH 285/486] Code change for nova support cinder client v2 Use v2 volume endpoint to attach/detach volume would be failed, due to nova not supporting cinder client v2 yet. This patch is for nova support cinder client v2. Implements bp support-cinderclient-v2 Closes-Bug: #1215772 Co-Authored-By: Mike Perez Co-Authored-By: Yaguang Tang Change-Id: Id8abbbb4d9b0c8c49ab51fc3d958ef0d487467f8 --- nova/context.py | 2 +- nova/tests/test_cinder.py | 219 +++++++++++++++++++++++++++++-- nova/tests/volume/test_cinder.py | 12 +- nova/volume/cinder.py | 135 +++++++++++++------ 4 files changed, 308 insertions(+), 60 deletions(-) diff --git a/nova/context.py b/nova/context.py index c512ad534c..818791458a 100644 --- a/nova/context.py +++ b/nova/context.py @@ -82,7 +82,7 @@ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog - if s.get('type') in ('volume',)] + if s.get('type') in ('volume', 'volumev2')] else: # if list is empty or none self.service_catalog = [] diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py index 81e1fd4f5e..d73aeafdf6 100644 --- a/nova/tests/test_cinder.py +++ b/nova/tests/test_cinder.py @@ -13,6 +13,9 @@ # under the License. from cinderclient import exceptions as cinder_exception +from cinderclient.v1 import client as cinder_client_v1 +from cinderclient.v2 import client as cinder_client_v2 +import mock import six.moves.urllib.parse as urlparse from nova import context @@ -39,13 +42,33 @@ def _stub_volume(**kwargs): volume.update(kwargs) return volume + +def _stub_volume_v2(**kwargs): + volume_v2 = { + 'name': None, + 'description': None, + "attachments": [], + "availability_zone": "cinderv2", + "created_at": "2013-08-10T00:00:00.000000", + "id": '00000000-0000-0000-0000-000000000000', + "metadata": {}, + "size": 1, + "snapshot_id": None, + "status": "available", + "volume_type": "None", + "bootable": "true" + } + volume_v2.update(kwargs) + return volume_v2 + + _image_metadata = { 'kernel_id': 'fake', 'ramdisk_id': 'fake' } -class FakeHTTPClient(cinder.cinder_client.client.HTTPClient): +class FakeHTTPClient(cinder.cinder_client.HTTPClient): def _cs_request(self, url, method, **kwargs): # Check that certain things are called correctly @@ -88,11 +111,59 @@ def get_volumes_5678(self, **kw): """Volume with image metadata.""" volume = {'volume': _stub_volume(id='1234', volume_image_metadata=_image_metadata) + } + return (200, volume) + + +class FakeHTTPClientV2(cinder.cinder_client.HTTPClient): + + def _cs_request(self, url, method, **kwargs): + # Check that certain things are called correctly + if method in ['GET', 'DELETE']: + assert 'body' not in kwargs + elif method == 'PUT': + assert 'body' in kwargs + + # Call the method + args = urlparse.parse_qsl(urlparse.urlparse(url)[4]) + kwargs.update(args) + munged_url = url.rsplit('?', 1)[0] + munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_') + munged_url = munged_url.replace('-', '_') + + callback = "%s_%s" % (method.lower(), munged_url) + + if not hasattr(self, callback): + raise AssertionError('Called unknown API method: %s %s, ' + 'expected fakes method name: %s' % + (method, url, callback)) + + # Note the call + self.callstack.append((method, url, kwargs.get('body', None))) + + status, body = getattr(self, callback)(**kwargs) + if hasattr(status, 'items'): + return status, body + else: + return {"status": status}, body + + def get_volumes_1234(self, **kw): + volume = {'volume': _stub_volume_v2(id='1234')} + return (200, volume) + + def get_volumes_nonexisting(self, **kw): + raise cinder_exception.NotFound(code=404, message='Resource not found') + + def get_volumes_5678(self, **kw): + """Volume with image metadata.""" + volume = {'volume': _stub_volume_v2( + id='1234', + volume_image_metadata=_image_metadata) } return (200, volume) -class FakeCinderClient(cinder.cinder_client.Client): +class FakeCinderClient(cinder_client_v1.Client): def __init__(self, username, password, project_id=None, auth_url=None, insecure=False, retries=None, cacert=None, timeout=None): @@ -110,6 +181,25 @@ def __init__(self, username, password, project_id=None, auth_url=None, self.callstack = self.client.callstack = [] +class FakeCinderClientV2(cinder_client_v2.Client): + + def __init__(self, username, password, project_id=None, auth_url=None, + insecure=False, retries=None, cacert=None, timeout=None): + super(FakeCinderClientV2, self).__init__(username, password, + project_id=project_id, + auth_url=auth_url, + insecure=insecure, + retries=retries, + cacert=cacert, + timeout=timeout) + self.client = FakeHTTPClientV2(username, password, project_id, + auth_url, insecure=insecure, + retries=retries, cacert=cacert, + timeout=timeout) + # keep a ref to the clients callstack for factory's assert_called + self.callstack = self.client.callstack = [] + + class FakeClientFactory(object): """Keep a ref to the FakeClient since volume.api.cinder throws it away.""" @@ -131,31 +221,58 @@ def assert_called(self, method, url, body=None, pos=-1): assert self.client.callstack[pos][2] == body +class FakeClientV2Factory(object): + """Keep a ref to the FakeClient since volume.api.cinder throws it away.""" + + def __call__(self, *args, **kwargs): + self.client = FakeCinderClientV2(*args, **kwargs) + return self.client + + def assert_called(self, method, url, body=None, pos=-1): + expected = (method, url) + called = self.client.callstack[pos][0:2] + + assert self.client.callstack, ("Expected %s %s but no calls " + "were made." % expected) + + assert expected == called, 'Expected %s %s; got %s %s' % (expected + + called) + + if body is not None: + assert self.client.callstack[pos][2] == body + + +fake_client_factory = FakeClientFactory() +fake_client_v2_factory = FakeClientV2Factory() + + +@mock.patch.object(cinder_client_v1, 'Client', fake_client_factory) class CinderTestCase(test.NoDBTestCase): - """Test case for cinder volume api.""" + """Test case for cinder volume v1 api.""" def setUp(self): super(CinderTestCase, self).setUp() - self.fake_client_factory = FakeClientFactory() - self.stubs.Set(cinder.cinder_client, "Client", - self.fake_client_factory) - self.api = cinder.API() catalog = [{ "type": "volume", "name": "cinder", "endpoints": [{"publicURL": "http://localhost:8776/v1/project_id"}] }] + cinder.CONF.set_override('cinder_catalog_info', + 'volume:cinder:publicURL') self.context = context.RequestContext('username', 'project_id', service_catalog=catalog) + cinder.cinderclient(self.context) + + self.api = cinder.API() def assert_called(self, *args, **kwargs): - self.fake_client_factory.assert_called(*args, **kwargs) + fake_client_factory.assert_called(*args, **kwargs) def test_context_with_catalog(self): self.api.get(self.context, '1234') self.assert_called('GET', '/volumes/1234') self.assertEqual( - self.fake_client_factory.client.client.management_url, + fake_client_factory.client.client.management_url, 'http://localhost:8776/v1/project_id') def test_cinder_endpoint_template(self): @@ -165,7 +282,7 @@ def test_cinder_endpoint_template(self): self.api.get(self.context, '1234') self.assert_called('GET', '/volumes/1234') self.assertEqual( - self.fake_client_factory.client.client.management_url, + fake_client_factory.client.client.management_url, 'http://other_host:8776/v1/project_id') def test_get_non_existing_volume(self): @@ -185,7 +302,7 @@ def test_cinder_api_insecure(self): self.api.get(self.context, '1234') self.assert_called('GET', '/volumes/1234') self.assertEqual( - self.fake_client_factory.client.client.verify_cert, False) + fake_client_factory.client.client.verify_cert, False) def test_cinder_api_cacert_file(self): cacert = "/etc/ssl/certs/ca-certificates.crt" @@ -193,7 +310,7 @@ def test_cinder_api_cacert_file(self): self.api.get(self.context, '1234') self.assert_called('GET', '/volumes/1234') self.assertEqual( - self.fake_client_factory.client.client.verify_cert, cacert) + fake_client_factory.client.client.verify_cert, cacert) def test_cinder_http_retries(self): retries = 42 @@ -201,11 +318,85 @@ def test_cinder_http_retries(self): self.api.get(self.context, '1234') self.assert_called('GET', '/volumes/1234') self.assertEqual( - self.fake_client_factory.client.client.retries, retries) + fake_client_factory.client.client.retries, retries) + + +@mock.patch.object(cinder_client_v2, 'Client', fake_client_v2_factory) +class CinderV2TestCase(test.NoDBTestCase): + """Test case for cinder volume v2 api.""" + + def setUp(self): + super(CinderV2TestCase, self).setUp() + catalog = [{ + "type": "volumev2", + "name": "cinderv2", + "endpoints": [{"publicURL": "http://localhost:8776/v2/project_id"}] + }] + self.context = context.RequestContext('username', 'project_id', + service_catalog=catalog) + cinder.cinderclient(self.context) + self.api = cinder.API() + + def tearDown(self): + cinder.CONF.reset() + super(CinderV2TestCase, self).tearDown() + + def assert_called(self, *args, **kwargs): + fake_client_v2_factory.assert_called(*args, **kwargs) + + def test_context_with_catalog(self): + self.api.get(self.context, '1234') + self.assert_called('GET', '/volumes/1234') + self.assertEqual( + 'http://localhost:8776/v2/project_id', + fake_client_v2_factory.client.client.management_url) + + def test_cinder_endpoint_template(self): + self.flags( + cinder_endpoint_template='http://other_host:8776/v2/%(project_id)s' + ) + self.api.get(self.context, '1234') + self.assert_called('GET', '/volumes/1234') + self.assertEqual( + 'http://other_host:8776/v2/project_id', + fake_client_v2_factory.client.client.management_url) + + def test_get_non_existing_volume(self): + self.assertRaises(exception.VolumeNotFound, self.api.get, self.context, + 'nonexisting') + + def test_volume_with_image_metadata(self): + volume = self.api.get(self.context, '5678') + self.assert_called('GET', '/volumes/5678') + self.assertIn('volume_image_metadata', volume) + self.assertEqual(_image_metadata, volume['volume_image_metadata']) + + def test_cinder_api_insecure(self): + # The True/False negation is awkward, but better for the client + # to pass us insecure=True and we check verify_cert == False + self.flags(cinder_api_insecure=True) + self.api.get(self.context, '1234') + self.assert_called('GET', '/volumes/1234') + self.assertFalse(fake_client_v2_factory.client.client.verify_cert) + + def test_cinder_api_cacert_file(self): + cacert = "/etc/ssl/certs/ca-certificates.crt" + self.flags(cinder_ca_certificates_file=cacert) + self.api.get(self.context, '1234') + self.assert_called('GET', '/volumes/1234') + self.assertEqual(cacert, + fake_client_v2_factory.client.client.verify_cert) + + def test_cinder_http_retries(self): + retries = 42 + self.flags(cinder_http_retries=retries) + self.api.get(self.context, '1234') + self.assert_called('GET', '/volumes/1234') + self.assertEqual(retries, fake_client_v2_factory.client.client.retries) def test_cinder_http_timeout(self): timeout = 123 self.flags(cinder_http_timeout=timeout) self.api.get(self.context, '1234') self.assertEqual(timeout, - self.fake_client_factory.client.client.timeout) + fake_client_v2_factory.client.client.timeout) diff --git a/nova/tests/volume/test_cinder.py b/nova/tests/volume/test_cinder.py index affa85a7c9..01bc42ce07 100644 --- a/nova/tests/volume/test_cinder.py +++ b/nova/tests/volume/test_cinder.py @@ -51,6 +51,7 @@ def setUp(self): self.mox.StubOutWithMock(cinder, 'cinderclient') self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view') self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view') + self.mox.StubOutWithMock(cinder, 'get_cinder_client_version') def test_get(self): volume_id = 'volume_id1' @@ -76,6 +77,7 @@ def test_get_failed(self): self.api.get, self.ctx, volume_id) def test_create(self): + cinder.get_cinder_client_version(self.ctx).AndReturn('2') cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() @@ -83,22 +85,26 @@ def test_create(self): self.api.create(self.ctx, 1, '', '') def test_create_failed(self): + cinder.get_cinder_client_version(self.ctx).AndReturn('2') cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest('')) self.mox.ReplayAll() self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') + @mock.patch('nova.volume.cinder.get_cinder_client_version') @mock.patch('nova.volume.cinder.cinderclient') - def test_create_over_quota_failed(self, mock_cinderclient): + def test_create_over_quota_failed(self, mock_cinderclient, + mock_get_version): + mock_get_version.return_value = '2' mock_cinderclient.return_value.volumes.create.side_effect = ( cinder_exception.OverLimit(413)) self.assertRaises(exception.OverQuota, self.api.create, self.ctx, 1, '', '') mock_cinderclient.return_value.volumes.create.assert_called_once_with( 1, user_id=None, imageRef=None, availability_zone=None, - volume_type=None, display_description='', snapshot_id=None, - display_name='', project_id=None, metadata=None) + volume_type=None, description='', snapshot_id=None, name='', + project_id=None, metadata=None) def test_get_all(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 87ad68f57d..488ef1ae38 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -21,20 +21,22 @@ import copy import sys +from cinderclient import client as cinder_client from cinderclient import exceptions as cinder_exception from cinderclient import service_catalog -from cinderclient.v1 import client as cinder_client from oslo.config import cfg +import six.moves.urllib.parse as urlparse from nova import availability_zones as az from nova import exception from nova.i18n import _ +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.openstack.common import strutils cinder_opts = [ cfg.StrOpt('cinder_catalog_info', - default='volume:cinder:publicURL', + default='volumev2:cinder:publicURL', help='Info to match when looking for cinder in the service ' 'catalog. Format is: separated values of the form: ' '::'), @@ -65,41 +67,17 @@ LOG = logging.getLogger(__name__) +CINDER_URL = None -def cinderclient(context): - # FIXME: the cinderclient ServiceCatalog object is mis-named. - # It actually contains the entire access blob. - # Only needed parts of the service catalog are passed in, see - # nova/context.py. - compat_catalog = { - 'access': {'serviceCatalog': context.service_catalog or []} - } - sc = service_catalog.ServiceCatalog(compat_catalog) - if CONF.cinder_endpoint_template: - url = CONF.cinder_endpoint_template % context.to_dict() - else: - info = CONF.cinder_catalog_info - service_type, service_name, endpoint_type = info.split(':') - # extract the region if set in configuration - if CONF.os_region_name: - attr = 'region' - filter_value = CONF.os_region_name - else: - attr = None - filter_value = None - url = sc.url_for(attr=attr, - filter_value=filter_value, - service_type=service_type, - service_name=service_name, - endpoint_type=endpoint_type) - - LOG.debug('Cinderclient connection created using URL: %s', url) - - c = cinder_client.Client(context.user_id, +def cinderclient(context): + global CINDER_URL + version = get_cinder_client_version(context) + c = cinder_client.Client(version, + context.user_id, context.auth_token, project_id=context.project_id, - auth_url=url, + auth_url=CINDER_URL, insecure=CONF.cinder_api_insecure, retries=CONF.cinder_http_retries, timeout=CONF.cinder_http_timeout, @@ -107,7 +85,7 @@ def cinderclient(context): # noauth extracts user_id:project_id from auth_token c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id, context.project_id) - c.client.management_url = url + c.client.management_url = CINDER_URL return c @@ -134,10 +112,14 @@ def _untranslate_volume_summary_view(context, vol): d['mountpoint'] = att['device'] else: d['attach_status'] = 'detached' - - d['display_name'] = vol.display_name - d['display_description'] = vol.display_description - + # NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name', + # and use 'description' instead of 'display_description' for volume. + if hasattr(vol, 'display_name'): + d['display_name'] = vol.display_name + d['display_description'] = vol.display_description + else: + d['display_name'] = vol.name + d['display_description'] = vol.description # TODO(jdg): Information may be lost in this translation d['volume_type_id'] = vol.volume_type d['snapshot_id'] = vol.snapshot_id @@ -161,8 +143,16 @@ def _untranslate_snapshot_summary_view(context, snapshot): d['progress'] = snapshot.progress d['size'] = snapshot.size d['created_at'] = snapshot.created_at - d['display_name'] = snapshot.display_name - d['display_description'] = snapshot.display_description + + # NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name', + # 'description' instead of 'display_description' for snapshot. + if hasattr(snapshot, 'display_name'): + d['display_name'] = snapshot.display_name + d['display_description'] = snapshot.display_description + else: + d['display_name'] = snapshot.name + d['display_description'] = snapshot.description + d['volume_id'] = snapshot.volume_id d['project_id'] = snapshot.project_id d['volume_size'] = snapshot.size @@ -213,6 +203,61 @@ def wrapper(self, ctx, snapshot_id, *args, **kwargs): return wrapper +def get_cinder_client_version(context): + """Parse cinder client version by endpoint url. + + :param context: Nova auth context. + :return: str value(1 or 2). + """ + global CINDER_URL + # FIXME: the cinderclient ServiceCatalog object is mis-named. + # It actually contains the entire access blob. + # Only needed parts of the service catalog are passed in, see + # nova/context.py. + compat_catalog = { + 'access': {'serviceCatalog': context.service_catalog or []} + } + sc = service_catalog.ServiceCatalog(compat_catalog) + if CONF.cinder_endpoint_template: + url = CONF.cinder_endpoint_template % context.to_dict() + else: + info = CONF.cinder_catalog_info + service_type, service_name, endpoint_type = info.split(':') + # extract the region if set in configuration + if CONF.os_region_name: + attr = 'region' + filter_value = CONF.os_region_name + else: + attr = None + filter_value = None + url = sc.url_for(attr=attr, + filter_value=filter_value, + service_type=service_type, + service_name=service_name, + endpoint_type=endpoint_type) + LOG.debug('Cinderclient connection created using URL: %s', url) + + valid_versions = ['v1', 'v2'] + magic_tuple = urlparse.urlsplit(url) + scheme, netloc, path, query, frag = magic_tuple + components = path.split("/") + for version in valid_versions: + if version in components[1]: + version = version[1:] + + if not CINDER_URL and version == '1': + msg = _LW('Cinder V1 API is deprecated as of the Juno ' + 'release, and Nova is still configured to use it. ' + 'Enable the V2 API in Cinder and set ' + 'cinder_catalog_info in nova.conf to use it.') + LOG.warn(msg) + + CINDER_URL = url + return version + msg = _("Invalid client version, must be one of: %s") % valid_versions + raise cinder_exception.UnsupportedVersion(msg) + + class API(object): """API for interacting with the volume manager.""" @@ -312,8 +357,6 @@ def create(self, context, size, name, description, snapshot=None, snapshot_id = None kwargs = dict(snapshot_id=snapshot_id, - display_name=name, - display_description=description, volume_type=volume_type, user_id=context.user_id, project_id=context.project_id, @@ -321,6 +364,14 @@ def create(self, context, size, name, description, snapshot=None, metadata=metadata, imageRef=image_id) + version = get_cinder_client_version(context) + if version == '1': + kwargs['display_name'] = name + kwargs['display_description'] = description + elif version == '2': + kwargs['name'] = name + kwargs['description'] = description + try: item = cinderclient(context).volumes.create(size, **kwargs) return _untranslate_volume_summary_view(context, item) From a0c769c6e2d75b0ff74c649bc88f3804690ff3d7 Mon Sep 17 00:00:00 2001 From: "ChangBo Guo(gcb)" Date: Wed, 6 Aug 2014 14:57:51 +0800 Subject: [PATCH 286/486] Fixes wrong usage of mock.assert_not_called() There is no method assert_not_called in mock, use assertFalse(mock.called) instead of that. Change-Id: I846c37fe79931c85384950f6ad6ecd79d4071fec --- nova/tests/volume/test_cinder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/volume/test_cinder.py b/nova/tests/volume/test_cinder.py index affa85a7c9..e6597ec96e 100644 --- a/nova/tests/volume/test_cinder.py +++ b/nova/tests/volume/test_cinder.py @@ -145,11 +145,11 @@ def test_check_attach_availability_zone_differs(self): volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) - mock_get_instance_az.assert_not_called() + self.assertFalse(mock_get_instance_az.called) volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) - mock_get_instance_az.assert_not_called() + self.assertFalse(mock_get_instance_az.called) cinder.CONF.reset() def test_check_attach(self): From b8fe7b13cff3170cb1b87c5a26fbc8a8ec9422cb Mon Sep 17 00:00:00 2001 From: pkholkin Date: Fri, 11 Jul 2014 16:54:38 +0400 Subject: [PATCH 287/486] Optimize db.floating_ip_deallocate 'select for update' and 'update' operations were combined into a single 'update' operation problems with 'select for update' relate to spec https://review.openstack.org/#/c/97310/1 now 'floating_ip_deallocate' function returns number of updated rows in db Partial-Bug: #1343613 Change-Id: I60f5a8f3e1541983dea1589783927107c00c5fa4 --- nova/db/sqlalchemy/api.py | 21 +++++++-------------- nova/network/floating_ips.py | 6 +++--- nova/tests/db/test_db_api.py | 6 +++++- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index cacee48dbb..6bd1db5ca4 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -905,21 +905,14 @@ def floating_ip_fixed_ip_associate(context, floating_address, @_retry_on_deadlock def floating_ip_deallocate(context, address): session = get_session() - with session.begin(): - floating_ip_ref = model_query(context, models.FloatingIp, - session=session).\ - filter_by(address=address).\ - filter(models.FloatingIp.project_id != null()).\ - with_lockmode('update').\ - first() - - if floating_ip_ref: - floating_ip_ref.update({'project_id': None, - 'host': None, - 'auto_assigned': False}) - - return floating_ip_ref + return model_query(context, models.FloatingIp, session=session).\ + filter_by(address=address).\ + filter(models.FloatingIp.project_id != null()).\ + update({'project_id': None, + 'host': None, + 'auto_assigned': False}, + synchronize_session=False) @require_context diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py index 02beaf122b..345f51dbb4 100644 --- a/nova/network/floating_ips.py +++ b/nova/network/floating_ips.py @@ -278,10 +278,10 @@ def deallocate_floating_ip(self, context, address, LOG.exception(_("Failed to update usages deallocating " "floating IP")) - floating_ip_ref = objects.FloatingIP.deallocate(context, address) - # floating_ip_ref will be None if concurrently another + rows_updated = objects.FloatingIP.deallocate(context, address) + # number of updated rows will be 0 if concurrently another # API call has also deallocated the same floating ip - if floating_ip_ref is None: + if not rows_updated: if reservations: QUOTAS.rollback(context, reservations, project_id=project_id) else: diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 9ffa0241e5..90c5c002c6 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -3894,13 +3894,17 @@ def test_floating_ip_fixed_ip_associate_float_ip_not_found(self): def test_floating_ip_deallocate(self): values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'} float_ip = self._create_floating_ip(values) - db.floating_ip_deallocate(self.ctxt, float_ip.address) + rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address) + self.assertEqual(1, rows_updated) updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) self.assertIsNone(updated_float_ip.project_id) self.assertIsNone(updated_float_ip.host) self.assertFalse(updated_float_ip.auto_assigned) + def test_floating_ip_deallocate_address_not_found(self): + self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2')) + def test_floating_ip_destroy(self): addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] float_ips = [self._create_floating_ip({'address': addr}) From 322cc9336fe6f6fe9b3f0da33c6b26a3e5ea9b0c Mon Sep 17 00:00:00 2001 From: Racha Ben Ali Date: Wed, 15 Jan 2014 00:42:31 -0800 Subject: [PATCH 288/486] Boot an instance with multiple vnics on same network If the same L2 network is requested multiple times for the same instance then creating ports on same network and attaching them to the same instance raises a DuplicateNetworks exception. Similarly, attaching multiple existent ports on same L2 network to the same instance raises a DuplicateNetworks exception. This is the default behavior that is defaulted by a newly introduced nova flag "allow_duplicate_networks" which is set to False by default. Not raising a DuplicateNetwork exception and allowing an instance to have multiple vnics on same network is useful for NfV service instances and in that case this newly introduced nova flag should be set to True. DocImpact: New neutron.allow_duplicate_networks configuration option Implements blueprint multiple-if-1-net Change-Id: Id4d633162c785c9b56b9c8426c0445770bc1352e Closes-Bug: #1187244 --- nova/network/neutronv2/api.py | 63 +++-- .../contrib/test_neutron_security_groups.py | 10 +- nova/tests/network/test_neutronv2.py | 246 ++++++++++++++---- 3 files changed, 255 insertions(+), 64 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f1a3654e44..fa2bf83efe 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -104,6 +104,10 @@ 'neutron client requests.', deprecated_group='DEFAULT', deprecated_name='neutron_ca_certificates_file'), + cfg.BoolOpt('allow_duplicate_networks', + default=False, + help='Allow an instance to have multiple vNICs attached to ' + 'the same Neutron network.'), ] CONF = cfg.CONF @@ -258,8 +262,8 @@ def allocate_for_instance(self, context, instance, **kwargs): requested_networks = kwargs.get('requested_networks') dhcp_opts = kwargs.get('dhcp_options', None) ports = {} - fixed_ips = {} net_ids = [] + ordered_networks = [] if requested_networks: for network_id, fixed_ip, port_id in requested_networks: if port_id: @@ -277,19 +281,30 @@ def allocate_for_instance(self, context, instance, **kwargs): # discard rather than popping. available_macs.discard(port['mac_address']) network_id = port['network_id'] - ports[network_id] = port - elif fixed_ip and network_id: - fixed_ips[network_id] = fixed_ip + ports[port_id] = port if network_id: net_ids.append(network_id) + ordered_networks.append((network_id, fixed_ip, port_id)) nets = self._get_available_networks(context, instance['project_id'], net_ids) - if not nets: LOG.warn(_("No network configured!"), instance=instance) return network_model.NetworkInfo([]) + # if this function is directly called without a requested_network param + # or if it is indirectly called through allocate_port_for_instance() + # with None params=(network_id=None, requested_ip=None, port_id=None): + if (not requested_networks + or requested_networks == [(None, None, None)]): + # bug/1267723 - if no network is requested and more + # than one is available then raise NetworkAmbiguous Exception + if len(nets) > 1: + msg = _("Multiple possible networks found, use a Network " + "ID to be more specific.") + raise exception.NetworkAmbiguous(msg) + ordered_networks.append((nets[0]['id'], None, None)) + security_groups = kwargs.get('security_groups', []) security_group_ids = [] @@ -328,7 +343,20 @@ def allocate_for_instance(self, context, instance, **kwargs): touched_port_ids = [] created_port_ids = [] ports_in_requested_order = [] - for network in nets: + nets_in_requested_order = [] + for network_id, fixed_ip, port_id in ordered_networks: + # Network lookup for available network_id + network = None + for net in nets: + if net['id'] == network_id: + network = net + break + # if network_id did not pass validate_networks() and not available + # here then skip it safely not continuing with a None Network + else: + continue + + nets_in_requested_order.append(network) # If security groups are requested on an instance then the # network must has a subnet associated with it. Some plugins # implement the port-security extension which requires @@ -345,21 +373,21 @@ def allocate_for_instance(self, context, instance, **kwargs): port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone}} try: - port = ports.get(network_id) self._populate_neutron_extension_values(context, instance, port_req_body) # Requires admin creds to set port bindings port_client = (neutron if not self._has_port_binding_extension(context) else neutronv2.get_client(context, admin=True)) - if port: + if port_id: + port = ports[port_id] port_client.update_port(port['id'], port_req_body) touched_port_ids.append(port['id']) ports_in_requested_order.append(port['id']) else: created_port = self._create_port( port_client, instance, network_id, - port_req_body, fixed_ips.get(network_id), + port_req_body, fixed_ip, security_group_ids, available_macs, dhcp_opts) created_port_ids.append(created_port) ports_in_requested_order.append(created_port) @@ -387,7 +415,8 @@ def allocate_for_instance(self, context, instance, **kwargs): msg = _("Failed to delete port %s") LOG.exception(msg, port_id) - nw_info = self.get_instance_nw_info(context, instance, networks=nets, + nw_info = self.get_instance_nw_info(context, instance, + networks=nets_in_requested_order, port_ids=ports_in_requested_order) # NOTE(danms): Only return info about ports we created in this run. # In the initial allocation case, this will be everything we created, @@ -690,8 +719,9 @@ def validate_networks(self, context, requested_networks, num_instances): address=fixed_ip, instance_uuid=i_uuid) - if net_id in instance_on_net_ids: - raise exception.NetworkDuplicated(network_id=net_id) + if (not CONF.neutron.allow_duplicate_networks and + net_id in instance_on_net_ids): + raise exception.NetworkDuplicated(network_id=net_id) instance_on_net_ids.append(net_id) # Now check to see if all requested networks exist @@ -709,10 +739,11 @@ def validate_networks(self, context, requested_networks, num_instances): requested_netid_set = set(net_ids_requested) returned_netid_set = set([net['id'] for net in nets]) lostid_set = requested_netid_set - returned_netid_set - id_str = '' - for _id in lostid_set: - id_str = id_str and id_str + ', ' + _id or _id - raise exception.NetworkNotFound(network_id=id_str) + if lostid_set: + id_str = '' + for _id in lostid_set: + id_str = id_str and id_str + ', ' + _id or _id + raise exception.NetworkNotFound(network_id=id_str) # Note(PhilD): Ideally Nova would create all required ports as part of # network validation, but port creation requires some details diff --git a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py index 6b2db76915..0fffeeaeb0 100644 --- a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py @@ -814,8 +814,14 @@ def list_security_groups(self, **_params): return {'security_groups': ret} def list_networks(self, **_params): - return {'networks': - [network for network in self._fake_networks.values()]} + # neutronv2/api.py _get_available_networks calls this assuming + # search_opts filter "shared" is implemented and not ignored + shared = _params.get("shared", None) + if shared: + return {'networks': []} + else: + return {'networks': + [network for network in self._fake_networks.values()]} def list_ports(self, **_params): ret = [] diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 1ff73b9361..8899415167 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -228,8 +228,19 @@ def setUp(self): 'name': 'out-of-this-world', 'router:external': True, 'tenant_id': 'should-be-an-admin'}] + # A network request with a duplicate + self.nets6 = [] + self.nets6.append(self.nets1[0]) + self.nets6.append(self.nets1[0]) + # A network request with a combo + self.nets7 = [] + self.nets7.append(self.nets2[1]) + self.nets7.append(self.nets1[0]) + self.nets7.append(self.nets2[1]) + self.nets7.append(self.nets1[0]) + self.nets = [self.nets1, self.nets2, self.nets3, - self.nets4, self.nets5] + self.nets4, self.nets5, self.nets6, self.nets7] self.port_address = '10.0.1.2' self.port_data1 = [{'network_id': 'my_netid1', @@ -357,30 +368,49 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): if macs: macs = set(macs) req_net_ids = [] + ordered_networks = [] + port = {} if 'requested_networks' in kwargs: - for id, fixed_ip, port_id in kwargs['requested_networks']: + for n_id, fixed_ip, port_id in kwargs['requested_networks']: if port_id: - self.moxed_client.show_port(port_id).AndReturn( - {'port': {'id': 'my_portid1', - 'network_id': 'my_netid1', - 'mac_address': 'my_mac1', - 'device_id': kwargs.get('_device') and - self.instance2['uuid'] or ''}}) - - ports['my_netid1'] = self.port_data1[0] - id = 'my_netid1' - if macs is not None: - macs.discard('my_mac1') + if port_id == 'my_portid3': + self.moxed_client.show_port(port_id).AndReturn( + {'port': {'id': 'my_portid3', + 'network_id': 'my_netid1', + 'mac_address': 'my_mac1', + 'device_id': kwargs.get('_device') and + self.instance2['uuid'] or + ''}}) + ports['my_netid1'] = [self.port_data1[0], + self.port_data3[0]] + ports[port_id] = self.port_data3[0] + n_id = 'my_netid1' + if macs is not None: + macs.discard('my_mac1') + else: + self.moxed_client.show_port(port_id).AndReturn( + {'port': {'id': 'my_portid1', + 'network_id': 'my_netid1', + 'mac_address': 'my_mac1', + 'device_id': kwargs.get('_device') and + self.instance2['uuid'] or + ''}}) + ports[port_id] = self.port_data1[0] + n_id = 'my_netid1' + if macs is not None: + macs.discard('my_mac1') else: - fixed_ips[id] = fixed_ip - req_net_ids.append(id) - expected_network_order = req_net_ids + fixed_ips[n_id] = fixed_ip + req_net_ids.append(n_id) + ordered_networks.append((n_id, fixed_ip, port_id)) else: - expected_network_order = [n['id'] for n in nets] + for n in nets: + ordered_networks.append((n['id'], None, None)) if kwargs.get('_break') == 'pre_list_networks': self.mox.ReplayAll() return api - search_ids = [net['id'] for net in nets if net['id'] in req_net_ids] + # search all req_net_ids as in api.py + search_ids = req_net_ids if search_ids: mox_list_params = {'id': mox.SameElementsAs(search_ids)} @@ -395,18 +425,34 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': []}) + if (('requested_networks' not in kwargs + or kwargs['requested_networks'] == [(None, None, None)]) + and len(nets) > 1): + self.mox.ReplayAll() + return api + ports_in_requested_net_order = [] - for net_id in expected_network_order: + nets_in_requested_net_order = [] + for net_id, fixed_ip, port_id in ordered_networks: port_req_body = { 'port': { 'device_id': self.instance['uuid'], 'device_owner': 'compute:nova', }, } + # Network lookup for available network_id + network = None + for net in nets: + if net['id'] == net_id: + network = net + break + # if net_id did not pass validate_networks() and not available + # here then skip it safely not continuing with a None Network + else: + continue if has_portbinding: port_req_body['port']['binding:host_id'] = ( self.instance.get('host')) - port = ports.get(net_id, None) if not has_portbinding: api._populate_neutron_extension_values(mox.IgnoreArg(), self.instance, mox.IgnoreArg()).AndReturn(None) @@ -417,8 +463,8 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): AndReturn(has_portbinding) api._has_port_binding_extension(mox.IgnoreArg()).\ AndReturn(has_portbinding) - if port: - port_id = port['id'] + if port_id: + port = ports[port_id] self.moxed_client.update_port(port_id, MyComparator(port_req_body) ).AndReturn( @@ -448,9 +494,11 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): MyComparator(port_req_body)).AndReturn(res_port) ports_in_requested_net_order.append(res_port['port']['id']) + nets_in_requested_net_order.append(network) + api.get_instance_nw_info(mox.IgnoreArg(), self.instance, - networks=nets, + networks=nets_in_requested_net_order, port_ids=ports_in_requested_net_order ).AndReturn(self._returned_nw_info) self.mox.ReplayAll() @@ -783,7 +831,10 @@ def test_allocate_for_instance_1(self): def test_allocate_for_instance_2(self): # Allocate one port in two networks env. - self._allocate_for_instance(2) + api = self._stub_allocate_for_instance(net_idx=2) + self.assertRaises(exception.NetworkAmbiguous, + api.allocate_for_instance, + self.context, self.instance) def test_allocate_for_instance_accepts_macs_kwargs_None(self): # The macs kwarg should be accepted as None. @@ -859,6 +910,23 @@ def test_allocate_for_instance_mac_conflicting_requested_port(self): self.instance, requested_networks=requested_networks, macs=set(['unknown:mac'])) + def test_allocate_for_instance_without_requested_networks(self): + api = self._stub_allocate_for_instance(net_idx=3) + self.assertRaises(exception.NetworkAmbiguous, + api.allocate_for_instance, + self.context, self.instance) + + def test_allocate_for_instance_with_requested_non_available_network(self): + """verify that a non available network is ignored. + self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1] + Do not create a port on a non available network self.nets3[2]. + """ + requested_networks = [ + (net['id'], None, None) + for net in (self.nets3[0], self.nets3[2], self.nets3[1])] + self._allocate_for_instance(net_idx=2, + requested_networks=requested_networks) + def test_allocate_for_instance_with_requested_networks(self): # specify only first and last network requested_networks = [ @@ -874,7 +942,7 @@ def test_allocate_for_instance_with_requested_networks_with_fixedip(self): requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_port(self): - requested_networks = [(None, None, 'myportid1')] + requested_networks = [(None, None, 'my_portid1')] self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) @@ -903,12 +971,11 @@ def test_allocate_for_instance_ex1(self): self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\ AndReturn(False) + requested_networks = [ + (net['id'], None, None) + for net in (self.nets2[0], self.nets2[1])] self.moxed_client.list_networks( - tenant_id=self.instance['project_id'], - shared=False).AndReturn( - {'networks': self.nets2}) - self.moxed_client.list_networks(shared=True).AndReturn( - {'networks': []}) + id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) index = 0 for network in self.nets2: binding_port_req_body = { @@ -941,7 +1008,8 @@ def test_allocate_for_instance_ex1(self): self.mox.ReplayAll() self.assertRaises(exception.PortLimitExceeded, api.allocate_for_instance, - self.context, self.instance) + self.context, self.instance, + requested_networks=requested_networks) def test_allocate_for_instance_ex2(self): """verify we have no port to delete @@ -955,12 +1023,11 @@ def test_allocate_for_instance_ex2(self): self.mox.StubOutWithMock(api, '_has_port_binding_extension') api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\ AndReturn(False) + requested_networks = [ + (net['id'], None, None) + for net in (self.nets2[0], self.nets2[1])] self.moxed_client.list_networks( - tenant_id=self.instance['project_id'], - shared=False).AndReturn( - {'networks': self.nets2}) - self.moxed_client.list_networks(shared=True).AndReturn( - {'networks': []}) + id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) binding_port_req_body = { 'port': { 'device_id': self.instance['uuid'], @@ -982,7 +1049,8 @@ def test_allocate_for_instance_ex2(self): Exception("fail to create port")) self.mox.ReplayAll() self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance, - self.context, self.instance) + self.context, self.instance, + requested_networks=requested_networks) def test_allocate_for_instance_no_port_or_network(self): class BailOutEarly(Exception): @@ -1211,9 +1279,10 @@ def test_validate_networks_ex_2(self): except exception.NetworkNotFound as ex: self.assertIn("my_netid2, my_netid3", str(ex)) - def test_validate_networks_duplicate(self): + def test_validate_networks_duplicate_disable(self): """Verify that the correct exception is thrown when duplicate - network ids are passed to validate_networks. + network ids are passed to validate_networks, when nova config flag + allow_duplicate_networks is set to its default value: False """ requested_networks = [('my_netid1', None, None), ('my_netid1', None, None)] @@ -1222,8 +1291,59 @@ def test_validate_networks_duplicate(self): neutronv2.get_client(None) api = neutronapi.API() self.assertRaises(exception.NetworkDuplicated, - api.validate_networks, - self.context, requested_networks, 1) + api.validate_networks, + self.context, requested_networks, 1) + + def test_validate_networks_duplicate_enable(self): + """Verify that no duplicateNetworks exception is thrown when duplicate + network ids are passed to validate_networks, when nova config flag + allow_duplicate_networks is set to its non default value: True + """ + self.flags(allow_duplicate_networks=True, group='neutron') + requested_networks = [('my_netid1', None, None), + ('my_netid1', None, None)] + ids = ['my_netid1', 'my_netid1'] + + self.moxed_client.list_networks( + id=mox.SameElementsAs(ids)).AndReturn( + {'networks': self.nets1}) + self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn( + {'ports': []}) + self.moxed_client.show_quota( + tenant_id='my_tenantid').AndReturn( + {'quota': {'port': 50}}) + self.mox.ReplayAll() + api = neutronapi.API() + api.validate_networks(self.context, requested_networks, 1) + + def test_allocate_for_instance_with_requested_networks_duplicates(self): + # specify a duplicate network to allocate to instance + self.flags(allow_duplicate_networks=True, group='neutron') + requested_networks = [ + (net['id'], None, None) + for net in (self.nets6[0], self.nets6[1])] + self._allocate_for_instance(net_idx=6, + requested_networks=requested_networks) + + def test_allocate_for_instance_requested_networks_duplicates_port(self): + # specify first port and last port that are in same network + self.flags(allow_duplicate_networks=True, group='neutron') + requested_networks = [ + (None, None, port['id']) + for port in (self.port_data1[0], self.port_data3[0])] + self._allocate_for_instance(net_idx=6, + requested_networks=requested_networks) + + def test_allocate_for_instance_requested_networks_duplicates_combo(self): + # specify a combo net_idx=7 : net2, port in net1, net2, port in net1 + self.flags(allow_duplicate_networks=True, group='neutron') + requested_networks = [ + ('my_netid2', None, None), + (None, None, self.port_data1[0]['id']), + ('my_netid2', None, None), + (None, None, self.port_data3[0]['id'])] + self._allocate_for_instance(net_idx=7, + requested_networks=requested_networks) def test_validate_networks_not_specified(self): requested_networks = [] @@ -1315,10 +1435,15 @@ def test_validate_networks_no_subnet_id(self): api.validate_networks, self.context, requested_networks, 1) - def test_validate_networks_ports_in_same_network(self): + def test_validate_networks_ports_in_same_network_disable(self): + """Verify that duplicateNetworks exception is thrown when ports on same + duplicate network are passed to validate_networks, when nova config + flag allow_duplicate_networks is set to its default False + """ + self.flags(allow_duplicate_networks=False, group='neutron') port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', - 'subnet_id': 'subnet_id'} + 'subnet_id': 'subnet_id'} port_b = self.port_data1[0] self.assertEqual(port_a['network_id'], port_b['network_id']) for port in [port_a, port_b]: @@ -1326,9 +1451,11 @@ def test_validate_networks_ports_in_same_network(self): port['device_owner'] = None requested_networks = [(None, None, port_a['id']), - (None, None, port_b['id'])] - self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a}) - self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b}) + (None, None, port_b['id'])] + self.moxed_client.show_port(port_a['id']).AndReturn( + {'port': port_a}) + self.moxed_client.show_port(port_b['id']).AndReturn( + {'port': port_b}) self.mox.ReplayAll() @@ -1337,6 +1464,33 @@ def test_validate_networks_ports_in_same_network(self): api.validate_networks, self.context, requested_networks, 1) + def test_validate_networks_ports_in_same_network_enable(self): + """Verify that duplicateNetworks exception is not thrown when ports + on same duplicate network are passed to validate_networks, when nova + config flag allow_duplicate_networks is set to its True + """ + self.flags(allow_duplicate_networks=True, group='neutron') + port_a = self.port_data3[0] + port_a['fixed_ips'] = {'ip_address': '10.0.0.2', + 'subnet_id': 'subnet_id'} + port_b = self.port_data1[0] + self.assertEqual(port_a['network_id'], port_b['network_id']) + for port in [port_a, port_b]: + port['device_id'] = None + port['device_owner'] = None + + requested_networks = [(None, None, port_a['id']), + (None, None, port_b['id'])] + self.moxed_client.show_port(port_a['id']).AndReturn( + {'port': port_a}) + self.moxed_client.show_port(port_b['id']).AndReturn( + {'port': port_b}) + + self.mox.ReplayAll() + + api = neutronapi.API() + api.validate_networks(self.context, requested_networks, 1) + def test_validate_networks_ports_not_in_same_network(self): port_a = self.port_data3[0] port_a['fixed_ips'] = {'ip_address': '10.0.0.2', From 06df067ab3fa4b3c6be63ad49741a114373cc1ad Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Wed, 13 Aug 2014 18:06:19 +0300 Subject: [PATCH 289/486] VMware: implement get_host_ip_addr This patch fixes a regression caused by commit 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b. The function get_host_ip_addr is used by the resource_tracker and if it is not implemented all resize operations fail. Also put back the check for missing host, user, password config options in the driver constructor. Closes-bug: #1356449 Change-Id: I4ba6a0f9c3b9c2ce0e1750f8414625235d01d422 --- nova/tests/virt/vmwareapi/test_driver_api.py | 40 ++++++++++++++++++++ nova/virt/vmwareapi/driver.py | 10 +++++ 2 files changed, 50 insertions(+) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 6a3aefe248..a3ac3d2d77 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -120,6 +120,43 @@ def _fake_create_session(inst): inst._session = session +class VMwareDriverStartupTestCase(test.NoDBTestCase): + def _start_driver_with_flags(self, expected_exception_type, startup_flags): + self.flags(**startup_flags) + with mock.patch( + 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'): + e = self.assertRaises( + Exception, driver.VMwareVCDriver, None) # noqa + self.assertIs(type(e), expected_exception_type) + + def test_start_driver_no_user(self): + self._start_driver_with_flags( + Exception, + dict(host_ip='ip', host_password='password', + group='vmware')) + + def test_start_driver_no_host(self): + self._start_driver_with_flags( + Exception, + dict(host_username='username', host_password='password', + group='vmware')) + + def test_start_driver_no_password(self): + self._start_driver_with_flags( + Exception, + dict(host_ip='ip', host_username='username', + group='vmware')) + + def test_start_driver_with_user_host_password(self): + # Getting the InvalidInput exception signifies that no exception + # is raised regarding missing user/password/host + self._start_driver_with_flags( + nova.exception.InvalidInput, + dict(host_ip='ip', host_password='password', + host_username="user", datastore_regex="bad(regex", + group='vmware')) + + class VMwareSessionTestCase(test.NoDBTestCase): def _fake_is_vim_object(self, module): @@ -377,6 +414,9 @@ def tearDown(self): vmwareapi_fake.cleanup() nova.tests.image.fake.FakeImageService_reset() + def test_get_host_ip_addr(self): + self.assertEqual('test_url', self.conn.get_host_ip_addr()) + def _set_exception_vars(self): self.wait_task = self.conn._session._wait_for_task self.call_method = self.conn._session._call_method diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index bb69935293..bba5208be6 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -118,6 +118,12 @@ class VMwareVCDriver(driver.ComputeDriver): def __init__(self, virtapi, scheme="https"): super(VMwareVCDriver, self).__init__(virtapi) + if (CONF.vmware.host_ip is None or + CONF.vmware.host_username is None or + CONF.vmware.host_password is None): + raise Exception(_("Must specify host_ip, host_username and " + "host_password to use vmwareapi.VMwareVCDriver")) + self._datastore_regex = None if CONF.vmware.datastore_regex: try: @@ -444,6 +450,10 @@ def get_volume_connector(self, instance): _volumeops = self._get_volumeops_for_compute_node(instance['node']) return _volumeops.get_volume_connector(instance) + def get_host_ip_addr(self): + """Returns the IP address of the vCenter host.""" + return CONF.vmware.host_ip + def snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) From 7a1e50772154b0b0907592582b91eb89407dfb8b Mon Sep 17 00:00:00 2001 From: Burt Holzman Date: Fri, 11 Jul 2014 16:11:00 -0500 Subject: [PATCH 290/486] Make nova-api use quotas object for reservations This makes nova-api use the quotas object for reservations; more work needs to be done to convert the rest. Partial-Bug: #1131395 Change-Id: I3e4d233278966c79019235ac8836a825c46c27ea --- nova/compute/api.py | 87 ++++++++++---------------- nova/tests/compute/test_compute_api.py | 13 ++-- 2 files changed, 43 insertions(+), 57 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index d4305ce623..b75f701256 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -331,8 +331,9 @@ def _check_num_instances_quota(self, context, instance_type, min_count, # Check the quota try: - reservations = QUOTAS.reserve(context, instances=max_count, - cores=req_cores, ram=req_ram) + quotas = objects.Quotas(context) + quotas.reserve(context, instances=max_count, + cores=req_cores, ram=req_ram) except exception.OverQuota as exc: # OK, we exceeded quota; let's figure out why... quotas = exc.kwargs['quotas'] @@ -388,7 +389,7 @@ def _check_num_instances_quota(self, context, instance_type, min_count, used=used, allowed=total_allowed, resource=resource) - return max_count, reservations + return max_count, quotas def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" @@ -802,7 +803,7 @@ def _provision_instances(self, context, instance_type, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping): # Reserve quotas - num_instances, quota_reservations = self._check_num_instances_quota( + num_instances, quotas = self._check_num_instances_quota( context, instance_type, min_count, max_count) LOG.debug("Going to run %s instances..." % num_instances) instances = [] @@ -832,10 +833,10 @@ def _provision_instances(self, context, instance_type, min_count, except exception.ObjectActionError: pass finally: - QUOTAS.rollback(context, quota_reservations) + quotas.rollback() # Commit the reservations - QUOTAS.commit(context, quota_reservations) + quotas.commit() return instances def _get_bdm_image_metadata(self, context, block_device_mapping, @@ -1416,7 +1417,6 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): host = instance['host'] bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) - reservations = None project_id, user_id = quotas_obj.ids_from_instance(context, instance) @@ -1440,7 +1440,7 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): instance=instance) original_task_state = instance.task_state - + quotas = None try: # NOTE(maoy): no expected_task_state needs to be set instance.update(instance_attrs) @@ -1450,10 +1450,10 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): # NOTE(comstud): If we delete the instance locally, we'll # commit the reservations here. Otherwise, the manager side # will commit or rollback the reservations based on success. - reservations = self._create_reservations(context, - instance, - original_task_state, - project_id, user_id) + quotas = self._create_reservations(context, + instance, + original_task_state, + project_id, user_id) if self.cell_type == 'api': # NOTE(comstud): If we're in the API cell, we need to @@ -1462,11 +1462,7 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): # commit reservations here early until we have a better # way to deal with quotas with cells. cb(context, instance, bdms, reservations=None) - if reservations: - QUOTAS.commit(context, - reservations, - project_id=project_id, - user_id=user_id) + quotas.commit() return if not host: @@ -1479,11 +1475,7 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): self.notifier, context, instance, "%s.end" % delete_type, system_metadata=instance.system_metadata) - if reservations: - QUOTAS.commit(context, - reservations, - project_id=project_id, - user_id=user_id) + quotas.commit() return except exception.ObjectActionError: instance.refresh() @@ -1502,42 +1494,30 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs): task_states.SOFT_DELETING): LOG.info(_('Instance is already in deleting state, ' 'ignoring this request'), instance=instance) - if reservations: - QUOTAS.rollback(context, reservations, - project_id=project_id, - user_id=user_id) + quotas.rollback() return self._record_action_start(context, instance, instance_actions.DELETE) - cb(context, instance, bdms, reservations=reservations) + cb(context, instance, bdms, + reservations=quotas.reservations) except exception.ComputeHostNotFound: pass if not is_up: # If compute node isn't up, just delete from DB self._local_delete(context, instance, bdms, delete_type, cb) - if reservations: - QUOTAS.commit(context, - reservations, - project_id=project_id, - user_id=user_id) - reservations = None + quotas.commit() + except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. - if reservations: - QUOTAS.rollback(context, - reservations, - project_id=project_id, - user_id=user_id) + if quotas: + quotas.rollback() except Exception: with excutils.save_and_reraise_exception(): - if reservations: - QUOTAS.rollback(context, - reservations, - project_id=project_id, - user_id=user_id) + if quotas: + quotas.rollback() def _confirm_resize_on_deleting(self, context, instance): # If in the middle of a resize, use confirm_resize to @@ -1618,13 +1598,14 @@ def _create_reservations(self, context, instance, original_task_state, instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb) LOG.debug("going to delete a resizing instance") - reservations = QUOTAS.reserve(context, - project_id=project_id, - user_id=user_id, - instances=-1, - cores=-instance_vcpus, - ram=-instance_memory_mb) - return reservations + quotas = objects.Quotas(context) + quotas.reserve(context, + project_id=project_id, + user_id=user_id, + instances=-1, + cores=-instance_vcpus, + ram=-instance_memory_mb) + return quotas def _local_delete(self, context, instance, bdms, delete_type, cb): LOG.warning(_("instance's host %s is down, deleting from " @@ -1722,7 +1703,7 @@ def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" # Reserve quotas flavor = instance.get_flavor() - num_instances, quota_reservations = self._check_num_instances_quota( + num_instances, quotas = self._check_num_instances_quota( context, flavor, 1, 1) self._record_action_start(context, instance, instance_actions.RESTORE) @@ -1739,10 +1720,10 @@ def restore(self, context, instance): instance.deleted_at = None instance.save(expected_task_state=[None]) - QUOTAS.commit(context, quota_reservations) + quotas.commit() except Exception: with excutils.save_and_reraise_exception(): - QUOTAS.rollback(context, quota_reservations) + quotas.rollback() @wrap_check_policy @check_instance_lock diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index 73e9ba4e7e..1e5873bee7 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -171,6 +171,9 @@ def test_create_quota_exceeded_messages(self): quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int)) quota.QUOTAS.reserve(self.context, instances=40, cores=mox.IsA(int), + expire=mox.IgnoreArg(), + project_id=mox.IgnoreArg(), + user_id=mox.IgnoreArg(), ram=mox.IsA(int)).AndRaise(quota_exception) self.mox.ReplayAll() @@ -568,7 +571,7 @@ def _test_downed_host_part(self, inst, updates, delete_time, delete_type): system_metadata=inst.system_metadata) def _test_delete(self, delete_type, **attrs): - reservations = 'fake-resv' + reservations = ['fake-resv'] inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context @@ -622,6 +625,7 @@ def _test_delete(self, delete_type, **attrs): self._test_delete_resizing_part(inst, deltas) quota.QUOTAS.reserve(self.context, project_id=inst.project_id, user_id=inst.user_id, + expire=mox.IgnoreArg(), **deltas).AndReturn(reservations) # NOTE(comstud): This is getting messy. But what we are wanting @@ -745,6 +749,7 @@ def test_delete_forced(self): def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' + quotas = quotas_obj.Quotas(self.context) updates = {'progress': 0, 'task_state': task_states.DELETING} self.mox.StubOutWithMock(inst, 'save') @@ -769,7 +774,7 @@ def test_delete_fast_if_host_not_set(self): self.compute_api._create_reservations(self.context, inst, inst.task_state, inst.project_id, inst.user_id - ).AndReturn(None) + ).AndReturn(quotas) if self.cell_type == 'api': rpcapi.terminate_instance( @@ -1969,8 +1974,8 @@ def get_image(context, image_href): None, new_image, flavor, {}, []) self.assertEqual(vm_mode.XEN, instance.vm_mode) - @mock.patch('nova.quota.QUOTAS.commit') - @mock.patch('nova.quota.QUOTAS.reserve') + @mock.patch('nova.objects.Quotas.commit') + @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') @mock.patch('nova.objects.InstanceAction.action_start') def test_restore(self, action_start, instance_save, quota_reserve, From cafa300a72313ccc7831f20ac1ca0df117a47cf5 Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Wed, 16 Oct 2013 09:52:19 -0700 Subject: [PATCH 291/486] Adds tests for Hyper-V VM Utils Adds unit tests for Hyper-V VMUtils and VMUtilsV2 classes. Co-Authored-By: Bogdan Teleaga Partial-Bug: #1220256 Change-Id: I87a13697b0a83dc8905573081f735b542bc9bf17 --- nova/tests/virt/hyperv/test_vmutils.py | 371 ++++++++++++++++++++++- nova/tests/virt/hyperv/test_vmutilsv2.py | 242 +++------------ 2 files changed, 417 insertions(+), 196 deletions(-) diff --git a/nova/tests/virt/hyperv/test_vmutils.py b/nova/tests/virt/hyperv/test_vmutils.py index c9e029ba3d..a883ce6ee8 100644 --- a/nova/tests/virt/hyperv/test_vmutils.py +++ b/nova/tests/virt/hyperv/test_vmutils.py @@ -1,4 +1,4 @@ -# Copyright 2013 Cloudbase Solutions Srl +# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,7 +15,9 @@ import mock +from nova import exception from nova import test +from nova.virt.hyperv import constants from nova.virt.hyperv import vmutils @@ -24,10 +26,46 @@ class VMUtilsTestCase(test.NoDBTestCase): _FAKE_VM_NAME = 'fake_vm' _FAKE_MEMORY_MB = 2 + _FAKE_VCPUS_NUM = 4 + _FAKE_JOB_PATH = 'fake_job_path' + _FAKE_RET_VAL = 0 + _FAKE_RET_VAL_BAD = -1 + _FAKE_CTRL_PATH = 'fake_ctrl_path' + _FAKE_CTRL_ADDR = 0 + _FAKE_DRIVE_ADDR = 0 + _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path' _FAKE_VM_PATH = "fake_vm_path" _FAKE_VHD_PATH = "fake_vhd_path" _FAKE_DVD_PATH = "fake_dvd_path" _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path" + _FAKE_SNAPSHOT_PATH = "fake_snapshot_path" + _FAKE_RES_DATA = "fake_res_data" + _FAKE_HOST_RESOURCE = "fake_host_resource" + _FAKE_CLASS = "FakeClass" + _FAKE_RES_PATH = "fake_res_path" + _FAKE_RES_NAME = 'fake_res_name' + _FAKE_ADDRESS = "fake_address" + _FAKE_JOB_STATUS_DONE = 7 + _FAKE_JOB_STATUS_BAD = -1 + _FAKE_JOB_DESCRIPTION = "fake_job_description" + _FAKE_ERROR = "fake_error" + _FAKE_ELAPSED_TIME = 0 + _CONCRETE_JOB = "Msvm_ConcreteJob" + _FAKE_DYNAMIC_MEMORY_RATIO = 1.0 + + _FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4, + 'EnabledState': 2, + 'MemoryUsage': 2, + 'UpTime': 1} + + _DEFINE_SYSTEM = 'DefineVirtualSystem' + _DESTROY_SYSTEM = 'DestroyVirtualSystem' + _DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot' + _ADD_RESOURCE = 'AddVirtualSystemResources' + _REMOVE_RESOURCE = 'RemoveVirtualSystemResources' + _SETTING_TYPE = 'SettingType' + + _VIRTUAL_SYSTEM_TYPE_REALIZED = 3 def setUp(self): self._vmutils = vmutils.VMUtils() @@ -40,6 +78,20 @@ def test_enable_vm_metrics_collection(self): self._vmutils.enable_vm_metrics_collection, self._FAKE_VM_NAME) + def test_get_vm_summary_info(self): + self._lookup_vm() + mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + + mock_summary = mock.MagicMock() + mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL, + [mock_summary]) + + for (key, val) in self._FAKE_SUMMARY_INFO.items(): + setattr(mock_summary, key, val) + + summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME) + self.assertEqual(self._FAKE_SUMMARY_INFO, summary) + def _lookup_vm(self): mock_vm = mock.MagicMock() self._vmutils._lookup_vm_check = mock.MagicMock( @@ -47,6 +99,25 @@ def _lookup_vm(self): mock_vm.path_.return_value = self._FAKE_VM_PATH return mock_vm + def test_lookup_vm_ok(self): + mock_vm = mock.MagicMock() + self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm] + vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME) + self.assertEqual(mock_vm, vm) + + def test_lookup_vm_multiple(self): + mockvm = mock.MagicMock() + self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm] + self.assertRaises(vmutils.HyperVException, + self._vmutils._lookup_vm_check, + self._FAKE_VM_NAME) + + def test_lookup_vm_none(self): + self._vmutils._conn.Msvm_ComputerSystem.return_value = [] + self.assertRaises(exception.NotFound, + self._vmutils._lookup_vm_check, + self._FAKE_VM_NAME) + def test_set_vm_memory_static(self): self._test_set_vm_memory_dynamic(1.0) @@ -108,6 +179,7 @@ def test_get_vm_disks(self): def _create_mock_disks(self): mock_rasd1 = mock.MagicMock() mock_rasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE + mock_rasd1.HostResource = [self._FAKE_VHD_PATH] mock_rasd1.Connection = [self._FAKE_VHD_PATH] mock_rasd2 = mock.MagicMock() @@ -115,3 +187,300 @@ def _create_mock_disks(self): mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH] return [mock_rasd1, mock_rasd2] + + @mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus') + @mock.patch.object(vmutils.VMUtils, '_set_vm_memory') + @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj') + def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus): + mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + getattr(mock_svc, self._DEFINE_SYSTEM).return_value = ( + None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + + mock_vm = mock_get_wmi_obj.return_value + self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm] + + mock_s = mock.MagicMock() + setattr(mock_s, + self._SETTING_TYPE, + self._VIRTUAL_SYSTEM_TYPE_REALIZED) + mock_vm.associators.return_value = [mock_s] + + self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB, + self._FAKE_VCPUS_NUM, False, + self._FAKE_DYNAMIC_MEMORY_RATIO) + + self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called) + mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB, + self._FAKE_DYNAMIC_MEMORY_RATIO) + + mock_set_vcpus.assert_called_with(mock_vm, mock_s, + self._FAKE_VCPUS_NUM, + False) + + def test_get_vm_scsi_controller(self): + self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE) + path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME) + self.assertEqual(self._FAKE_RES_PATH, path) + + def test_get_vm_ide_controller(self): + self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE) + path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME, + self._FAKE_ADDRESS) + self.assertEqual(self._FAKE_RES_PATH, path) + + def _prepare_get_vm_controller(self, resource_sub_type): + mock_vm = self._lookup_vm() + mock_vm_settings = mock.MagicMock() + mock_rasds = mock.MagicMock() + mock_rasds.path_.return_value = self._FAKE_RES_PATH + mock_rasds.ResourceSubType = resource_sub_type + mock_rasds.Address = self._FAKE_ADDRESS + mock_vm_settings.associators.return_value = [mock_rasds] + mock_vm.associators.return_value = [mock_vm_settings] + + def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings): + mock_rasds = mock_vm_settings.associators.return_value[0] + mock_rasds.path_.return_value = mock_path + mock_rasds.ResourceSubType = mock_subtype + return mock_rasds + + @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') + @mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller') + def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd): + mock_vm = self._lookup_vm() + mock_rsd = mock_get_new_rsd.return_value + + with mock.patch.object(self._vmutils, + '_add_virt_resource') as mock_add_virt_res: + self._vmutils.attach_ide_drive(self._FAKE_VM_NAME, + self._FAKE_CTRL_PATH, + self._FAKE_CTRL_ADDR, + self._FAKE_DRIVE_ADDR) + + mock_add_virt_res.assert_called_with(mock_rsd, + mock_vm.path_.return_value) + + mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR) + self.assertTrue(mock_get_new_rsd.called) + + @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') + def test_create_scsi_controller(self, mock_get_new_rsd): + mock_vm = self._lookup_vm() + with mock.patch.object(self._vmutils, + '_add_virt_resource') as mock_add_virt_res: + self._vmutils.create_scsi_controller(self._FAKE_VM_NAME) + + mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value, + mock_vm.path_.return_value) + + @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data') + def test_attach_volume_to_controller(self, mock_get_new_rsd): + mock_vm = self._lookup_vm() + with mock.patch.object(self._vmutils, + '_add_virt_resource') as mock_add_virt_res: + self._vmutils.attach_volume_to_controller( + self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR, + self._FAKE_MOUNTED_DISK_PATH) + + mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value, + mock_vm.path_.return_value) + + @mock.patch.object(vmutils.VMUtils, '_modify_virt_resource') + @mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name') + def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res): + self._lookup_vm() + mock_nic = mock_get_nic_conn.return_value + self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None) + + mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH) + + @mock.patch.object(vmutils.VMUtils, '_get_new_setting_data') + def test_create_nic(self, mock_get_new_virt_res): + self._lookup_vm() + mock_nic = mock_get_new_virt_res.return_value + + with mock.patch.object(self._vmutils, + '_add_virt_resource') as mock_add_virt_res: + self._vmutils.create_nic( + self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS) + + mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH) + + def test_set_vm_state(self): + mock_vm = self._lookup_vm() + mock_vm.RequestStateChange.return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + + self._vmutils.set_vm_state(self._FAKE_VM_NAME, + constants.HYPERV_VM_STATE_ENABLED) + mock_vm.RequestStateChange.assert_called_with( + constants.HYPERV_VM_STATE_ENABLED) + + def test_destroy_vm(self): + self._lookup_vm() + + mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + getattr(mock_svc, self._DESTROY_SYSTEM).return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + + self._vmutils.destroy_vm(self._FAKE_VM_NAME) + + getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with( + self._FAKE_VM_PATH) + + @mock.patch.object(vmutils.VMUtils, '_wait_for_job') + def test_check_ret_val_ok(self, mock_wait_for_job): + self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED, + self._FAKE_JOB_PATH) + mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH) + + def test_check_ret_val_exception(self): + self.assertRaises(vmutils.HyperVException, + self._vmutils.check_ret_val, + self._FAKE_RET_VAL_BAD, + self._FAKE_JOB_PATH) + + def test_wait_for_job_done(self): + mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED) + job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH) + self.assertEqual(mockjob, job) + + def test_wait_for_job_exception_concrete_job(self): + mock_job = self._prepare_wait_for_job() + mock_job.path.return_value.Class = self._CONCRETE_JOB + self.assertRaises(vmutils.HyperVException, + self._vmutils._wait_for_job, + self._FAKE_JOB_PATH) + + def test_wait_for_job_exception_with_error(self): + mock_job = self._prepare_wait_for_job() + mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL) + self.assertRaises(vmutils.HyperVException, + self._vmutils._wait_for_job, + self._FAKE_JOB_PATH) + + def test_wait_for_job_exception_no_error(self): + mock_job = self._prepare_wait_for_job() + mock_job.GetError.return_value = (None, None) + self.assertRaises(vmutils.HyperVException, + self._vmutils._wait_for_job, + self._FAKE_JOB_PATH) + + def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD): + mock_job = mock.MagicMock() + mock_job.JobState = state + mock_job.Description = self._FAKE_JOB_DESCRIPTION + mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME + + self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job) + return mock_job + + def test_add_virt_resource(self): + mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + getattr(mock_svc, self._ADD_RESOURCE).return_value = ( + self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL) + mock_res_setting_data = mock.MagicMock() + mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA + + self._vmutils._add_virt_resource(mock_res_setting_data, + self._FAKE_VM_PATH) + self._assert_add_resources(mock_svc) + + def test_modify_virt_resource(self): + mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + mock_svc.ModifyVirtualSystemResources.return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + mock_res_setting_data = mock.MagicMock() + mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA + + self._vmutils._modify_virt_resource(mock_res_setting_data, + self._FAKE_VM_PATH) + + mock_svc.ModifyVirtualSystemResources.assert_called_with( + ResourceSettingData=[self._FAKE_RES_DATA], + ComputerSystem=self._FAKE_VM_PATH) + + def test_remove_virt_resource(self): + mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + getattr(mock_svc, self._REMOVE_RESOURCE).return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + mock_res_setting_data = mock.MagicMock() + mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH + + self._vmutils._remove_virt_resource(mock_res_setting_data, + self._FAKE_VM_PATH) + self._assert_remove_resources(mock_svc) + + @mock.patch.object(vmutils, 'wmi', create=True) + @mock.patch.object(vmutils.VMUtils, 'check_ret_val') + def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi): + self._lookup_vm() + + mock_svc = self._get_snapshot_service() + mock_svc.CreateVirtualSystemSnapshot.return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock()) + + self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME) + + mock_svc.CreateVirtualSystemSnapshot.assert_called_with( + self._FAKE_VM_PATH) + + mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL, + self._FAKE_JOB_PATH) + + def test_remove_vm_snapshot(self): + mock_svc = self._get_snapshot_service() + getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = ( + self._FAKE_JOB_PATH, self._FAKE_RET_VAL) + + self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH) + getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with( + self._FAKE_SNAPSHOT_PATH) + + def test_detach_vm_disk(self): + self._lookup_vm() + mock_disk = self._prepare_mock_disk() + + with mock.patch.object(self._vmutils, + '_remove_virt_resource') as mock_rm_virt_res: + self._vmutils.detach_vm_disk(self._FAKE_VM_NAME, + self._FAKE_HOST_RESOURCE) + + mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH) + + def test_get_mounted_disk_resource_from_path(self): + mock_disk_1 = mock.MagicMock() + mock_disk_2 = mock.MagicMock() + mock_disk_2.HostResource = [self._FAKE_MOUNTED_DISK_PATH] + self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2] + + physical_disk = self._vmutils._get_mounted_disk_resource_from_path( + self._FAKE_MOUNTED_DISK_PATH) + + self.assertEqual(mock_disk_2, physical_disk) + + def test_get_controller_volume_paths(self): + self._prepare_mock_disk() + mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE} + disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH) + self.assertEqual(mock_disks, disks) + + def _prepare_mock_disk(self): + mock_disk = mock.MagicMock() + mock_disk.HostResource = [self._FAKE_HOST_RESOURCE] + mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH + mock_disk.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE + self._vmutils._conn.query.return_value = [mock_disk] + + return mock_disk + + def _get_snapshot_service(self): + return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] + + def _assert_add_resources(self, mock_svc): + getattr(mock_svc, self._ADD_RESOURCE).assert_called_with( + [self._FAKE_RES_DATA], self._FAKE_VM_PATH) + + def _assert_remove_resources(self, mock_svc): + getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with( + [self._FAKE_RES_PATH], self._FAKE_VM_PATH) diff --git a/nova/tests/virt/hyperv/test_vmutilsv2.py b/nova/tests/virt/hyperv/test_vmutilsv2.py index 2a49acf5cb..f6ab55ff1d 100644 --- a/nova/tests/virt/hyperv/test_vmutilsv2.py +++ b/nova/tests/virt/hyperv/test_vmutilsv2.py @@ -1,4 +1,4 @@ -# Copyright 2013 Cloudbase Solutions Srl +# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,159 +14,51 @@ import mock -from nova import test +from nova.tests.virt.hyperv import test_vmutils from nova.virt.hyperv import vmutilsv2 -class VMUtilsV2TestCase(test.NoDBTestCase): +class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase): """Unit tests for the Hyper-V VMUtilsV2 class.""" - _FAKE_VM_NAME = 'fake_vm' - _FAKE_MEMORY_MB = 2 - _FAKE_VCPUS_NUM = 4 - _FAKE_JOB_PATH = 'fake_job_path' - _FAKE_RET_VAL = 0 - _FAKE_CTRL_PATH = 'fake_ctrl_path' - _FAKE_CTRL_ADDR = 0 - _FAKE_DRIVE_ADDR = 0 - _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path' - _FAKE_VM_PATH = "fake_vm_path" - _FAKE_ENABLED_STATE = 1 - _FAKE_SNAPSHOT_PATH = "_FAKE_SNAPSHOT_PATH" - _FAKE_RES_DATA = "fake_res_data" - _FAKE_RES_PATH = "fake_res_path" - _FAKE_DYNAMIC_MEMORY_RATIO = 1.0 - _FAKE_VHD_PATH = "fake_vhd_path" - _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path" + _DEFINE_SYSTEM = 'DefineSystem' + _DESTROY_SYSTEM = 'DestroySystem' + _DESTROY_SNAPSHOT = 'DestroySnapshot' + + _ADD_RESOURCE = 'AddResourceSettings' + _REMOVE_RESOURCE = 'RemoveResourceSettings' + _SETTING_TYPE = 'VirtualSystemType' + + _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized' def setUp(self): + super(VMUtilsV2TestCase, self).setUp() self._vmutils = vmutilsv2.VMUtilsV2() self._vmutils._conn = mock.MagicMock() - super(VMUtilsV2TestCase, self).setUp() - - def _lookup_vm(self): - mock_vm = mock.MagicMock() - self._vmutils._lookup_vm_check = mock.MagicMock( - return_value=mock_vm) - mock_vm.path_.return_value = self._FAKE_VM_PATH - return mock_vm - - def test_create_vm(self): - mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] - mock_svc.DefineSystem.return_value = (None, self._FAKE_JOB_PATH, - self._FAKE_RET_VAL) - - self._vmutils._get_wmi_obj = mock.MagicMock() - mock_vm = self._vmutils._get_wmi_obj.return_value - - mock_s = mock.MagicMock() - mock_s.VirtualSystemType = self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED - mock_vm.associators.return_value = [mock_s] - - self._vmutils._set_vm_memory = mock.MagicMock() - self._vmutils._set_vm_vcpus = mock.MagicMock() - - self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB, - self._FAKE_VCPUS_NUM, False, - self._FAKE_DYNAMIC_MEMORY_RATIO) - - self.assertTrue(mock_svc.DefineSystem.called) - self._vmutils._set_vm_memory.assert_called_with( - mock_vm, mock_s, self._FAKE_MEMORY_MB, - self._FAKE_DYNAMIC_MEMORY_RATIO) - - self._vmutils._set_vm_vcpus.assert_called_with(mock_vm, mock_s, - self._FAKE_VCPUS_NUM, - False) - - def test_attach_ide_drive(self): - self._lookup_vm() - self._vmutils._get_vm_ide_controller = mock.MagicMock() - self._vmutils._get_new_resource_setting_data = mock.MagicMock() - self._vmutils._add_virt_resource = mock.MagicMock() - - self._vmutils.attach_ide_drive(self._FAKE_VM_NAME, - self._FAKE_CTRL_PATH, - self._FAKE_CTRL_ADDR, - self._FAKE_DRIVE_ADDR) - - self.assertTrue(self._vmutils._get_vm_ide_controller.called) - self.assertTrue(self._vmutils._get_new_resource_setting_data.called) - self.assertTrue(self._vmutils._add_virt_resource.called) - - def test_attach_volume_to_controller(self): - self._lookup_vm() - self._vmutils._add_virt_resource = mock.MagicMock() - - self._vmutils.attach_volume_to_controller(self._FAKE_VM_NAME, - self._FAKE_CTRL_PATH, - self._FAKE_CTRL_ADDR, - self._FAKE_MOUNTED_DISK_PATH) - - self.assertTrue(self._vmutils._add_virt_resource.called) - - def test_create_scsi_controller(self): - self._lookup_vm() - self._vmutils._add_virt_resource = mock.MagicMock() - - self._vmutils.create_scsi_controller(self._FAKE_VM_NAME) - - self.assertTrue(self._vmutils._add_virt_resource.called) - - def test_get_vm_storage_paths(self): - mock_vm = self._lookup_vm() - - mock_vmsettings = [mock.MagicMock()] - mock_vm.associators.return_value = mock_vmsettings - mock_sasds = [] - mock_sasd1 = mock.MagicMock() - mock_sasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE - mock_sasd1.HostResource = [self._FAKE_VHD_PATH] - mock_sasd2 = mock.MagicMock() - mock_sasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE - mock_sasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH] - mock_sasds.append(mock_sasd1) - mock_sasds.append(mock_sasd2) - mock_vmsettings[0].associators.return_value = mock_sasds - - storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME) - (disk_files, volume_drives) = storage - - mock_vm.associators.assert_called_with( - wmi_result_class='Msvm_VirtualSystemSettingData') - mock_vmsettings[0].associators.assert_called_with( - wmi_result_class='Msvm_StorageAllocationSettingData') - self.assertEqual([self._FAKE_VHD_PATH], disk_files) - self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives) - - def test_destroy(self): - self._lookup_vm() - + def test_modify_virt_resource(self): mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] - mock_svc.DestroySystem.return_value = (self._FAKE_JOB_PATH, - self._FAKE_RET_VAL) - - self._vmutils.destroy_vm(self._FAKE_VM_NAME) - - mock_svc.DestroySystem.assert_called_with(self._FAKE_VM_PATH) - - def test_get_vm_state(self): - self._vmutils.get_vm_summary_info = mock.MagicMock( - return_value={'EnabledState': self._FAKE_ENABLED_STATE}) + mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH, + mock.MagicMock(), + self._FAKE_RET_VAL) + mock_res_setting_data = mock.MagicMock() + mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA - enabled_state = self._vmutils.get_vm_state(self._FAKE_VM_NAME) + self._vmutils._modify_virt_resource(mock_res_setting_data, + self._FAKE_VM_PATH) - self.assertEqual(self._FAKE_ENABLED_STATE, enabled_state) + mock_svc.ModifyResourceSettings.assert_called_with( + ResourceSettings=[self._FAKE_RES_DATA]) - def test_take_vm_snapshot(self): + @mock.patch.object(vmutilsv2, 'wmi', create=True) + @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val') + def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi): self._lookup_vm() - mock_svc = self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0] + mock_svc = self._get_snapshot_service() mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL) - vmutilsv2.wmi = mock.MagicMock() self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME) @@ -174,70 +66,19 @@ def test_take_vm_snapshot(self): AffectedSystem=self._FAKE_VM_PATH, SnapshotType=self._vmutils._SNAPSHOT_FULL) - def test_remove_vm_snapshot(self): - mock_svc = self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0] - mock_svc.DestroySnapshot.return_value = (self._FAKE_JOB_PATH, - self._FAKE_RET_VAL) - - self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH) - - mock_svc.DestroySnapshot.assert_called_with(self._FAKE_SNAPSHOT_PATH) + mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL, + self._FAKE_JOB_PATH) - def test_set_nic_connection(self): + @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource') + @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data') + @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name') + def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd, + mock_add_virt_res): self._lookup_vm() - - self._vmutils._get_nic_data_by_name = mock.MagicMock() - self._vmutils._add_virt_resource = mock.MagicMock() - - fake_eth_port = mock.MagicMock() - self._vmutils._get_new_setting_data = mock.MagicMock( - return_value=fake_eth_port) + fake_eth_port = mock_get_new_sd.return_value self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None) - - self._vmutils._add_virt_resource.assert_called_with(fake_eth_port, - self._FAKE_VM_PATH) - - def test_add_virt_resource(self): - mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] - mock_svc.AddResourceSettings.return_value = (self._FAKE_JOB_PATH, - mock.MagicMock(), - self._FAKE_RET_VAL) - mock_res_setting_data = mock.MagicMock() - mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA - - self._vmutils._add_virt_resource(mock_res_setting_data, - self._FAKE_VM_PATH) - - mock_svc.AddResourceSettings.assert_called_with(self._FAKE_VM_PATH, - [self._FAKE_RES_DATA]) - - def test_modify_virt_resource(self): - mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] - mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH, - mock.MagicMock(), - self._FAKE_RET_VAL) - mock_res_setting_data = mock.MagicMock() - mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA - - self._vmutils._modify_virt_resource(mock_res_setting_data, - self._FAKE_VM_PATH) - - mock_svc.ModifyResourceSettings.assert_called_with( - ResourceSettings=[self._FAKE_RES_DATA]) - - def test_remove_virt_resource(self): - mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] - mock_svc.RemoveResourceSettings.return_value = (self._FAKE_JOB_PATH, - self._FAKE_RET_VAL) - mock_res_setting_data = mock.MagicMock() - mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH - - self._vmutils._remove_virt_resource(mock_res_setting_data, - self._FAKE_VM_PATH) - - mock_svc.RemoveResourceSettings.assert_called_with( - [self._FAKE_RES_PATH]) + mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH) @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks') def test_enable_vm_metrics_collection(self, mock_get_vm_disks): @@ -266,3 +107,14 @@ def test_enable_vm_metrics_collection(self, mock_get_vm_disks): MetricCollectionEnabled=self._vmutils._METRIC_ENABLED)) mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True) + + def _get_snapshot_service(self): + return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0] + + def _assert_add_resources(self, mock_svc): + getattr(mock_svc, self._ADD_RESOURCE).assert_called_with( + self._FAKE_VM_PATH, [self._FAKE_RES_DATA]) + + def _assert_remove_resources(self, mock_svc): + getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with( + [self._FAKE_RES_PATH]) From fa2f139e22ed1317f4afe01faaf7ee3943444715 Mon Sep 17 00:00:00 2001 From: Robert Li Date: Fri, 11 Jul 2014 14:26:28 -0400 Subject: [PATCH 292/486] makes sure correct PCI device allocation With this patch, on the compute node, a stats pool will be associated with a list of devices that belongs to the pool. This makes sure that PCI devices are allocated out of the same stats pools that are used by the nova scheduler to satisfy the PCI requests. And therefore, stats pools on the compute nodes will be kept in sync with their counterparts in the nova scheduler. Change-Id: I2d97c6354215e2ac5ff659e3203c33771abe1c09 Closes-bug: #1288809 --- nova/pci/pci_manager.py | 46 +------------------ nova/pci/pci_stats.py | 54 +++++++++++++++++++++- nova/tests/pci/test_pci_manager.py | 74 +++++++++++++++--------------- nova/tests/pci/test_pci_stats.py | 21 +++++++-- 4 files changed, 108 insertions(+), 87 deletions(-) diff --git a/nova/pci/pci_manager.py b/nova/pci/pci_manager.py index f5f57af11c..a56ab42152 100644 --- a/nova/pci/pci_manager.py +++ b/nova/pci/pci_manager.py @@ -26,7 +26,6 @@ from nova.pci import pci_device from nova.pci import pci_request from nova.pci import pci_stats -from nova.pci import pci_utils LOG = logging.getLogger(__name__) @@ -74,46 +73,6 @@ def _initial_instance_usage(self): elif dev['status'] == 'available': self.stats.add_device(dev) - def _filter_devices_for_spec(self, request_spec, pci_devs): - return [p for p in pci_devs - if pci_utils.pci_device_prop_match(p, request_spec)] - - def _get_free_devices_for_request(self, pci_request, pci_devs): - count = pci_request.get('count', 1) - spec = pci_request.get('spec', []) - devs = self._filter_devices_for_spec(spec, pci_devs) - if len(devs) < count: - return None - else: - return devs[:count] - - @property - def free_devs(self): - return [dev for dev in self.pci_devs if dev.status == 'available'] - - def get_free_devices_for_requests(self, pci_requests): - """Select free pci devices for requests - - Pci_requests is a list of pci_request dictionaries. Each dictionary - has three keys: - count: number of pci devices required, default 1 - spec: the pci properties that the devices should meet - alias_name: alias the pci_request is translated from, optional - - If any single pci_request cannot find any free devices, then the - entire request list will fail. - """ - alloc = [] - - for request in pci_requests: - available = self._get_free_devices_for_request( - request, - [p for p in self.free_devs if p not in alloc]) - if not available: - return [] - alloc.extend(available) - return alloc - @property def all_devs(self): return self.pci_devs @@ -162,7 +121,7 @@ def set_hvdevs(self, devices): else: # Note(yjiang5): no need to update stats if an assigned # device is hot removed. - self.stats.consume_device(existed) + self.stats.remove_device(existed) else: new_value = next((dev for dev in devices if dev['address'] == existed['address'])) @@ -196,12 +155,11 @@ def _claim_instance(self, instance, prefix=''): instance, prefix) if not pci_requests: return None - devs = self.get_free_devices_for_requests(pci_requests) + devs = self.stats.consume_requests(pci_requests) if not devs: raise exception.PciDeviceRequestFailed(pci_requests) for dev in devs: pci_device.claim(dev, instance) - self.stats.consume_device(dev) return devs def _allocate_instance(self, instance, devs): diff --git a/nova/pci/pci_stats.py b/nova/pci/pci_stats.py index ae9454253f..e83ec22528 100644 --- a/nova/pci/pci_stats.py +++ b/nova/pci/pci_stats.py @@ -17,6 +17,7 @@ import copy from nova import exception +from nova.i18n import _LE from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.pci import pci_utils @@ -70,8 +71,10 @@ def add_device(self, dev): if not pool: pool = dict((k, dev.get(k)) for k in self.pool_keys) pool['count'] = 0 + pool['devices'] = [] self.pools.append(pool) pool['count'] += 1 + pool['devices'].append(dev) @staticmethod def _decrease_pool_count(pool_list, pool, count=1): @@ -87,14 +90,56 @@ def _decrease_pool_count(pool_list, pool, count=1): pool_list.remove(pool) return count - def consume_device(self, dev): + def remove_device(self, dev): """Remove one device from the first pool that it matches.""" pool = self._get_first_pool(dev) if not pool: raise exception.PciDevicePoolEmpty( compute_node_id=dev.compute_node_id, address=dev.address) + pool['devices'].remove(dev) self._decrease_pool_count(self.pools, pool) + def get_free_devs(self): + free_devs = [] + for pool in self.pools: + free_devs.extend(pool['devices']) + return free_devs + + def consume_requests(self, pci_requests): + alloc_devices = [] + for request in pci_requests: + count = request.get('count', 1) + spec = request.get('spec', []) + # For now, keep the same algorithm as during scheduling: + # a spec may be able to match multiple pools. + pools = self._filter_pools_for_spec(self.pools, spec) + # Failed to allocate the required number of devices + # Return the devices already allocated back to their pools + if sum([pool['count'] for pool in pools]) < count: + LOG.error(_LE("Failed to allocate PCI devices for instance." + " Unassigning devices back to pools." + " This should not happen, since the scheduler" + " should have accurate information, and allocation" + " during claims is controlled via a hold" + " on the compute node semaphore")) + for d in range(len(alloc_devices)): + self.add_device(alloc_devices.pop()) + raise exception.PciDeviceRequestFailed(requests=pci_requests) + + for pool in pools: + if pool['count'] >= count: + num_alloc = count + else: + num_alloc = pool['count'] + count -= num_alloc + pool['count'] -= num_alloc + for d in range(num_alloc): + pci_dev = pool['devices'].pop() + alloc_devices.append(pci_dev) + if count == 0: + break + return alloc_devices + @staticmethod def _filter_pools_for_spec(pools, request_specs): return [pool for pool in pools @@ -134,7 +179,12 @@ def apply_requests(self, requests): raise exception.PciDeviceRequestFailed(requests=requests) def __iter__(self): - return iter(self.pools) + # 'devices' shouldn't be part of stats + pools = [] + for pool in self.pools: + tmp = dict((k, v) for k, v in pool.iteritems() if k != 'devices') + pools.append(tmp) + return iter(pools) def clear(self): """Clear all the stats maintained.""" diff --git a/nova/tests/pci/test_pci_manager.py b/nova/tests/pci/test_pci_manager.py index 734e89d07a..19caf2cb3b 100644 --- a/nova/tests/pci/test_pci_manager.py +++ b/nova/tests/pci/test_pci_manager.py @@ -104,7 +104,8 @@ def setUp(self): def test_pcidev_tracker_create(self): self.assertEqual(len(self.tracker.pci_devs), 3) - self.assertEqual(len(self.tracker.free_devs), 3) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 3) self.assertEqual(self.tracker.stale.keys(), []) self.assertEqual(len(self.tracker.stats.pools), 2) self.assertEqual(self.tracker.node_id, 1) @@ -113,23 +114,6 @@ def test_pcidev_tracker_create_no_nodeid(self): self.tracker = pci_manager.PciDevTracker() self.assertEqual(len(self.tracker.pci_devs), 0) - def test_get_free_devices_for_requests(self): - devs = self.tracker.get_free_devices_for_requests(fake_pci_requests) - self.assertEqual(len(devs), 2) - self.assertEqual(set([dev['vendor_id'] for dev in devs]), - set(['v1', 'v'])) - - def test_get_free_devices_for_requests_empty(self): - devs = self.tracker.get_free_devices_for_requests([]) - self.assertEqual(len(devs), 0) - - def test_get_free_devices_for_requests_meet_partial(self): - requests = copy.deepcopy(fake_pci_requests) - requests[1]['count'] = 2 - requests[1]['spec'][0]['vendor_id'] = 'v' - devs = self.tracker.get_free_devices_for_requests(requests) - self.assertEqual(len(devs), 0) - def test_set_hvdev_new_dev(self): fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2') fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1), @@ -172,8 +156,9 @@ def test_set_hvdev_changed_stal(self): def test_update_pci_for_instance_active(self): self.pci_requests = fake_pci_requests self.tracker.update_pci_for_instance(self.inst) - self.assertEqual(len(self.tracker.free_devs), 1) - self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v') + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) + self.assertEqual(free_devs[0]['vendor_id'], 'v') def test_update_pci_for_instance_fail(self): self.pci_requests = copy.deepcopy(fake_pci_requests) @@ -185,10 +170,12 @@ def test_update_pci_for_instance_fail(self): def test_update_pci_for_instance_deleted(self): self.pci_requests = fake_pci_requests self.tracker.update_pci_for_instance(self.inst) - self.assertEqual(len(self.tracker.free_devs), 1) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) self.inst.vm_state = vm_states.DELETED self.tracker.update_pci_for_instance(self.inst) - self.assertEqual(len(self.tracker.free_devs), 3) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 3) self.assertEqual(set([dev['vendor_id'] for dev in self.tracker.pci_devs]), set(['v', 'v1'])) @@ -196,15 +183,18 @@ def test_update_pci_for_instance_deleted(self): def test_update_pci_for_instance_resize_source(self): self.pci_requests = fake_pci_requests self.tracker.update_pci_for_instance(self.inst) - self.assertEqual(len(self.tracker.free_devs), 1) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) self.inst.task_state = task_states.RESIZE_MIGRATED self.tracker.update_pci_for_instance(self.inst) - self.assertEqual(len(self.tracker.free_devs), 3) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 3) def test_update_pci_for_instance_resize_dest(self): self.pci_requests = fake_pci_requests self.tracker.update_pci_for_migration(self.inst) - self.assertEqual(len(self.tracker.free_devs), 1) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2) self.assertNotIn('fake-inst-uuid', self.tracker.allocations) self.inst.task_state = task_states.RESIZE_FINISH @@ -215,14 +205,16 @@ def test_update_pci_for_instance_resize_dest(self): def test_update_pci_for_migration_in(self): self.pci_requests = fake_pci_requests self.tracker.update_pci_for_migration(self.inst) - self.assertEqual(len(self.tracker.free_devs), 1) - self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v') + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) + self.assertEqual(free_devs[0]['vendor_id'], 'v') def test_update_pci_for_migration_out(self): self.pci_requests = fake_pci_requests self.tracker.update_pci_for_migration(self.inst) self.tracker.update_pci_for_migration(self.inst, sign=-1) - self.assertEqual(len(self.tracker.free_devs), 3) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 3) self.assertEqual(set([dev['vendor_id'] for dev in self.tracker.pci_devs]), set(['v', 'v1'])) @@ -276,13 +268,15 @@ def test_clean_usage(self): self.tracker.update_pci_for_instance(self.inst) self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}] self.tracker.update_pci_for_instance(inst_2) - self.assertEqual(len(self.tracker.free_devs), 1) - self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v') + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) + self.assertEqual(free_devs[0]['vendor_id'], 'v') self.tracker.clean_usage([self.inst], [migr], [orph]) - self.assertEqual(len(self.tracker.free_devs), 2) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 2) self.assertEqual( - set([dev['vendor_id'] for dev in self.tracker.free_devs]), + set([dev['vendor_id'] for dev in free_devs]), set(['v', 'v1'])) def test_clean_usage_claims(self): @@ -295,11 +289,13 @@ def test_clean_usage_claims(self): self.tracker.update_pci_for_instance(self.inst) self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}] self.tracker.update_pci_for_migration(inst_2) - self.assertEqual(len(self.tracker.free_devs), 1) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 1) self.tracker.clean_usage([self.inst], [migr], [orph]) - self.assertEqual(len(self.tracker.free_devs), 2) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(len(free_devs), 2) self.assertEqual( - set([dev['vendor_id'] for dev in self.tracker.free_devs]), + set([dev['vendor_id'] for dev in free_devs]), set(['v', 'v1'])) def test_clean_usage_no_request_match_no_claims(self): @@ -308,11 +304,13 @@ def test_clean_usage_no_request_match_no_claims(self): # calls clean_usage. self.pci_requests = None self.tracker.update_pci_for_migration(instance=self.inst, sign=1) - self.assertEqual(3, len(self.tracker.free_devs)) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(3, len(free_devs)) self.tracker.clean_usage([], [], []) - self.assertEqual(3, len(self.tracker.free_devs)) + free_devs = self.tracker.pci_stats.get_free_devs() + self.assertEqual(3, len(free_devs)) self.assertEqual( - set([dev['address'] for dev in self.tracker.free_devs]), + set([dev['address'] for dev in free_devs]), set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3'])) diff --git a/nova/tests/pci/test_pci_stats.py b/nova/tests/pci/test_pci_stats.py index bf27a68114..9a81c58e11 100644 --- a/nova/tests/pci/test_pci_stats.py +++ b/nova/tests/pci/test_pci_stats.py @@ -71,15 +71,15 @@ def test_add_device(self): set([1, 2])) def test_remove_device(self): - self.pci_stats.consume_device(self.fake_dev_2) + self.pci_stats.remove_device(self.fake_dev_2) self.assertEqual(len(self.pci_stats.pools), 1) self.assertEqual(self.pci_stats.pools[0]['count'], 2) self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1') def test_remove_device_exception(self): - self.pci_stats.consume_device(self.fake_dev_2) + self.pci_stats.remove_device(self.fake_dev_2) self.assertRaises(exception.PciDevicePoolEmpty, - self.pci_stats.consume_device, + self.pci_stats.remove_device, self.fake_dev_2) def test_json_creat(self): @@ -116,3 +116,18 @@ def test_apply_requests_failed(self): self.assertRaises(exception.PciDeviceRequestFailed, self.pci_stats.apply_requests, pci_requests_multiple) + + def test_consume_requests(self): + devs = self.pci_stats.consume_requests(pci_requests) + self.assertEqual(2, len(devs)) + self.assertEqual(set(['v1', 'v2']), + set([dev['vendor_id'] for dev in devs])) + + def test_consume_requests_empty(self): + devs = self.pci_stats.consume_requests([]) + self.assertEqual(0, len(devs)) + + def test_consume_requests_failed(self): + self.assertRaises(exception.PciDeviceRequestFailed, + self.pci_stats.consume_requests, + pci_requests_multiple) From 04aeabe7c028d9c071348ad3ee78ae5da13c5bc5 Mon Sep 17 00:00:00 2001 From: Eiichi Aikawa Date: Thu, 23 Jan 2014 14:37:31 +0900 Subject: [PATCH 293/486] Add API schema for v2.1/v3 config_drive extension By defining the API schema, it is possible to separate the validation code from the API method. The API method can be more simple. In addition, a response of API validation error can be consistent for the whole Nova API. Partially implements blueprint v3-api-schema Change-Id: Ia0681941c5449051c90425348466bd7dc44c2e45 --- .../compute/plugins/v3/config_drive.py | 5 ++ .../compute/schemas/v3/config_drive.py | 19 +++++ .../compute/plugins/v3/test_config_drive.py | 69 ++++++++----------- 3 files changed, 51 insertions(+), 42 deletions(-) create mode 100644 nova/api/openstack/compute/schemas/v3/config_drive.py diff --git a/nova/api/openstack/compute/plugins/v3/config_drive.py b/nova/api/openstack/compute/plugins/v3/config_drive.py index b8c4986cd9..345c30f2d9 100644 --- a/nova/api/openstack/compute/plugins/v3/config_drive.py +++ b/nova/api/openstack/compute/plugins/v3/config_drive.py @@ -15,6 +15,8 @@ """Config Drive extension.""" +from nova.api.openstack.compute.schemas.v3 import config_drive as \ + schema_config_drive from nova.api.openstack import extensions from nova.api.openstack import wsgi @@ -68,3 +70,6 @@ def get_resources(self): def server_create(self, server_dict, create_kwargs): create_kwargs['config_drive'] = server_dict.get(ATTRIBUTE_NAME) + + def get_server_create_schema(self): + return schema_config_drive.server_create diff --git a/nova/api/openstack/compute/schemas/v3/config_drive.py b/nova/api/openstack/compute/schemas/v3/config_drive.py new file mode 100644 index 0000000000..b67a9ead0d --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/config_drive.py @@ -0,0 +1,19 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.validation import parameter_types + +server_create = { + 'os-config-drive:config_drive': parameter_types.boolean, +} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py b/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py index ac4295dc6f..3ab3cf7018 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py @@ -25,6 +25,7 @@ from nova.compute import api as compute_api from nova.compute import flavors from nova import db +from nova import exception from nova.network import manager from nova.openstack.common import jsonutils from nova import test @@ -202,7 +203,7 @@ def create(*args, **kwargs): self._test_create_extra(params, override_controller=self.no_config_drive_controller) - def test_create_instance_with_config_drive(self): + def _create_instance_body_of_config_drive(self, param): def create(*args, **kwargs): self.assertIn('config_drive', kwargs) return old_create(*args, **kwargs) @@ -220,7 +221,7 @@ def create(*args, **kwargs): 'hello': 'world', 'open': 'stack', }, - config_drive.ATTRIBUTE_NAME: "true", + config_drive.ATTRIBUTE_NAME: param, }, } @@ -228,55 +229,39 @@ def create(*args, **kwargs): req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" + + return req, body + + def test_create_instance_with_config_drive(self): + param = True + req, body = self._create_instance_body_of_config_drive(param) res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(FAKE_UUID, server['id']) + def test_create_instance_with_config_drive_as_boolean_string(self): + param = 'false' + req, body = self._create_instance_body_of_config_drive(param) + res = self.controller.create(req, body=body).obj server = res['server'] self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_with_bad_config_drive(self): - image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - flavor_ref = 'http://localhost/v3/flavors/3' - body = { - 'server': { - 'name': 'config_drive_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, - 'metadata': { - 'hello': 'world', - 'open': 'stack', - }, - config_drive.ATTRIBUTE_NAME: image_href, - }, - } - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - self.assertRaises(webob.exc.HTTPBadRequest, + param = 12345 + req, body = self._create_instance_body_of_config_drive(param) + self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_instance_without_config_drive(self): - image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - flavor_ref = 'http://localhost/v3/flavors/3' - body = { - 'server': { - 'name': 'config_drive_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, - 'metadata': { - 'hello': 'world', - 'open': 'stack', - }, - }, - } - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" + param = True + req, body = self._create_instance_body_of_config_drive(param) + del body['server'][config_drive.ATTRIBUTE_NAME] res = self.controller.create(req, body=body).obj - server = res['server'] self.assertEqual(FAKE_UUID, server['id']) + + def test_create_instance_with_empty_config_drive(self): + param = '' + req, body = self._create_instance_body_of_config_drive(param) + self.assertRaises(exception.ValidationError, + self.controller.create, req, body=body) From b26cbdd4874c672c07092a5e72833b119aebca3b Mon Sep 17 00:00:00 2001 From: Eiichi Aikawa Date: Mon, 27 Jan 2014 19:28:57 +0900 Subject: [PATCH 294/486] Add API schema for v2.1/v3 security_groups extension By defining the API schema, it is possible to separate the validation code from the API method. The API method can be more simple. In addition, a response of API validation error can be consistent for the whole Nova API. Partially implements blueprint v3-api-schema Change-Id: Ic240b877d8e7afeb32adf3dc3899a55396cf7210 --- .../compute/plugins/v3/security_groups.py | 5 ++++ .../compute/schemas/v3/security_groups.py | 28 +++++++++++++++++++ .../plugins/v3/test_security_groups.py | 15 ++++++++++ 3 files changed, 48 insertions(+) create mode 100644 nova/api/openstack/compute/schemas/v3/security_groups.py diff --git a/nova/api/openstack/compute/plugins/v3/security_groups.py b/nova/api/openstack/compute/plugins/v3/security_groups.py index 9d807a5298..08aa949d04 100644 --- a/nova/api/openstack/compute/plugins/v3/security_groups.py +++ b/nova/api/openstack/compute/plugins/v3/security_groups.py @@ -18,6 +18,8 @@ import json +from nova.api.openstack.compute.schemas.v3 import security_groups as \ + schema_security_groups from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute @@ -128,6 +130,9 @@ def server_create(self, server_dict, create_kwargs): create_kwargs['security_group'] = list( set(create_kwargs['security_group'])) + def get_server_create_schema(self): + return schema_security_groups.server_create + class NativeSecurityGroupExceptions(object): @staticmethod diff --git a/nova/api/openstack/compute/schemas/v3/security_groups.py b/nova/api/openstack/compute/schemas/v3/security_groups.py new file mode 100644 index 0000000000..aafd296e68 --- /dev/null +++ b/nova/api/openstack/compute/schemas/v3/security_groups.py @@ -0,0 +1,28 @@ +# Copyright 2014 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.validation import parameter_types + +server_create = { + 'os-security-groups:security_groups': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': parameter_types.name, + }, + 'additionalProperties': False, + } + }, +} diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py b/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py index a055ffb561..38cb07dd87 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py @@ -332,3 +332,18 @@ def create(*args, **kwargs): self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params, override_controller=self.no_security_groups_controller) + + def test_create_with_invalid_key_security_group(self): + param = {security_groups.ATTRIBUTE_NAME: [{'invalid': 'group'}]} + self.assertRaises(exception.ValidationError, + self._test_create_extra, param) + + def test_create_with_no_string_value_security_group(self): + param = {security_groups.ATTRIBUTE_NAME: [{'name': 12345}]} + self.assertRaises(exception.ValidationError, + self._test_create_extra, param) + + def test_create_with_too_long_value_security_group(self): + param = {security_groups.ATTRIBUTE_NAME: [{'name': ('a' * 260)}]} + self.assertRaises(exception.ValidationError, + self._test_create_extra, param) From dac0ce979ed7871539c8bcf19fcc73a2ab1390a3 Mon Sep 17 00:00:00 2001 From: Haiwei Xu Date: Tue, 25 Feb 2014 02:36:51 +0900 Subject: [PATCH 295/486] Catch NeutronClientException when showing a network When a network id can't be found, neutronclient raise NetworkNotFoundClient exception, but this exception is not handled by nova. This will cause a 500 error. This patch fixes this bug. Closes-Bug: #1286969 Change-Id: Ia96c9668c74374476d4dccdbdb281e99d91b0088 --- nova/network/neutronv2/api.py | 5 ++++- nova/tests/network/test_neutronv2.py | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 16a7d94392..850d3dff0b 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -829,7 +829,10 @@ def get_all(self, context): def get(self, context, network_uuid): """Get specific network for client.""" client = neutronv2.get_client(context) - network = client.show_network(network_uuid).get('network') or {} + try: + network = client.show_network(network_uuid).get('network') or {} + except neutron_client_exc.NetworkNotFoundClient: + raise exception.NetworkNotFound(network_id=network_uuid) network['label'] = network['name'] return network diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 809336e606..da742f0cd7 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -2369,6 +2369,19 @@ def test_create_port_for_instance_mac_address_in_use(self, # Assert the calls. create_port_mock.assert_called_once_with(port_req_body) + def test_get_network_detail_not_found(self): + api = neutronapi.API() + expected_exc = exceptions.NetworkNotFoundClient() + network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786' + with mock.patch.object(client.Client, 'show_network', + side_effect=expected_exc) as ( + fake_show_network): + self.assertRaises(exception.NetworkNotFound, + api.get, + self.context, + network_uuid) + fake_show_network.assert_called_once_with(network_uuid) + class TestNeutronv2ModuleMethods(test.TestCase): From 8ee037a71fd0e37c94243beeda0273eacc3ba42d Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Thu, 7 Aug 2014 16:20:03 +0930 Subject: [PATCH 296/486] Makes versions API output deterministic Makes the order of available versions that the API version information is returned to clients deterministic. This is necessary to ensure that with PYTHONHASHSEED to fixed to 0 the unittests will consistently pass. Change-Id: I5b7f6b7f061424adc2263c485562e72c7cc29255 Partial-Bug: #1350287 --- nova/api/openstack/compute/views/versions.py | 2 +- .../api/openstack/compute/test_versions.py | 72 +++++++++---------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/nova/api/openstack/compute/views/versions.py b/nova/api/openstack/compute/views/versions.py index 242d93f1ad..572d73bd64 100644 --- a/nova/api/openstack/compute/views/versions.py +++ b/nova/api/openstack/compute/views/versions.py @@ -32,7 +32,7 @@ def __init__(self, base_url): def build_choices(self, VERSIONS, req): version_objs = [] - for version in VERSIONS: + for version in sorted(VERSIONS): version = VERSIONS[version] version_objs.append({ "id": version['id'], diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py index de4f0097a5..bd0a7d6bd7 100644 --- a/nova/tests/api/openstack/compute/test_versions.py +++ b/nova/tests/api/openstack/compute/test_versions.py @@ -328,23 +328,6 @@ def test_multi_choice_image(self): expected = { "choices": [ - { - "id": "v3.0", - "status": "EXPERIMENTAL", - "links": [ - { - "href": "http://localhost/v3/images/1", - "rel": "self", - }, - ], - "media-types": [ - { - "base": "application/json", - "type": - "application/vnd.openstack.compute+json;version=3", - } - ], - }, { "id": "v2.0", "status": "CURRENT", @@ -367,6 +350,23 @@ def test_multi_choice_image(self): }, ], }, + { + "id": "v3.0", + "status": "EXPERIMENTAL", + "links": [ + { + "href": "http://localhost/v3/images/1", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/json", + "type": + "application/vnd.openstack.compute+json;version=3", + } + ], + }, ], } self.assertThat(jsonutils.loads(res.body), @@ -384,7 +384,7 @@ def test_multi_choice_image_xml(self): versions = root.xpath('ns:version', namespaces=NS) self.assertEqual(len(versions), 2) - version = versions[1] + version = versions[0] self.assertEqual(version.get('id'), 'v2.0') self.assertEqual(version.get('status'), 'CURRENT') media_types = version.xpath('ns:media-types/ns:media-type', @@ -398,7 +398,7 @@ def test_multi_choice_image_xml(self): self.assertTrue(common.compare_links(links, [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}])) - version = versions[0] + version = versions[1] self.assertEqual(version.get('id'), 'v3.0') self.assertEqual(version.get('status'), 'EXPERIMENTAL') media_types = version.xpath('ns:media-types/ns:media-type', @@ -432,23 +432,6 @@ def test_multi_choice_server(self): expected = { "choices": [ - { - "id": "v3.0", - "status": "EXPERIMENTAL", - "links": [ - { - "href": "http://localhost/v3/servers/" + uuid, - "rel": "self", - }, - ], - "media-types": [ - { - "base": "application/json", - "type": - "application/vnd.openstack.compute+json;version=3", - } - ], - }, { "id": "v2.0", "status": "CURRENT", @@ -471,6 +454,23 @@ def test_multi_choice_server(self): }, ], }, + { + "id": "v3.0", + "status": "EXPERIMENTAL", + "links": [ + { + "href": "http://localhost/v3/servers/" + uuid, + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/json", + "type": + "application/vnd.openstack.compute+json;version=3", + } + ], + }, ], } self.assertThat(jsonutils.loads(res.body), From 5de983be86fc05224d04fe48e12540b6914ce73f Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Thu, 7 Aug 2014 03:02:05 -0700 Subject: [PATCH 297/486] Return 404 when floating IP pool not found The FloatingIPPoolNotFound exception is raised only by the Neutron integration API. When that happens, the exception handling code should ensure a 404 response code is returned in order to guarantee consistency with nova-network. Change-Id: Id8ef7ee2c0d85f8b784b8d1d46c7120331f1e01a Closes-Bug: 1353936 --- nova/api/openstack/compute/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/compute/contrib/test_floating_ips.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py index 57626e30e1..bb6c342f11 100644 --- a/nova/api/openstack/compute/contrib/floating_ips.py +++ b/nova/api/openstack/compute/contrib/floating_ips.py @@ -171,7 +171,7 @@ def create(self, req, body=None): msg = _("IP allocation over quota.") raise webob.exc.HTTPForbidden(explanation=msg) except exception.FloatingIpPoolNotFound as e: - raise webob.exc.HTTPBadRequest(explanation=e.format_message()) + raise webob.exc.HTTPNotFound(explanation=e.format_message()) return _translate_floating_ip_view(ip) diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py index 7b96a8f513..27fc434a3c 100644 --- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py @@ -348,7 +348,7 @@ def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock): side_effect=exception.FloatingIpPoolNotFound()) def test_floating_ip_create_with_unknown_pool(self, allocate_mock): req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips') - ex = self.assertRaises(webob.exc.HTTPBadRequest, + ex = self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, {'pool': 'non_existent_pool'}) self.assertIn('Floating ip pool not found.', ex.explanation) From 1b0e4725ae0efa52de76ec211bba9d061eb04140 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Thu, 7 Aug 2014 18:54:25 +0800 Subject: [PATCH 298/486] Return 404 instead of 501 for unsupported actions Change unsupported actions return code to 404 from 501 to ensure consistency with other unsupported actions in the REST API. Change-Id: Ibf40ef9108f209b70ffbab6e2387d2d469f7373e --- nova/api/openstack/compute/consoles.py | 4 ---- nova/api/openstack/compute/contrib/attach_interfaces.py | 5 ----- nova/api/openstack/compute/ips.py | 6 ------ nova/api/openstack/compute/plugins/v3/ips.py | 6 ------ 4 files changed, 21 deletions(-) diff --git a/nova/api/openstack/compute/consoles.py b/nova/api/openstack/compute/consoles.py index 67e3a7ad86..40b4aceabe 100644 --- a/nova/api/openstack/compute/consoles.py +++ b/nova/api/openstack/compute/consoles.py @@ -111,10 +111,6 @@ def show(self, req, server_id, id): raise exc.HTTPNotFound() return _translate_detail_keys(console) - def update(self, req, server_id, id, body): - """You can't update a console.""" - raise exc.HTTPNotImplemented() - def delete(self, req, server_id, id): """Deletes a console.""" try: diff --git a/nova/api/openstack/compute/contrib/attach_interfaces.py b/nova/api/openstack/compute/contrib/attach_interfaces.py index 01f22c00b5..dfcc364465 100644 --- a/nova/api/openstack/compute/contrib/attach_interfaces.py +++ b/nova/api/openstack/compute/contrib/attach_interfaces.py @@ -129,11 +129,6 @@ def create(self, req, server_id, body): return self.show(req, server_id, vif['id']) - def update(self, req, server_id, id, body): - """Update a interface attachment. We don't currently support this.""" - msg = _("Attachments update is not supported") - raise exc.HTTPNotImplemented(explanation=msg) - def delete(self, req, server_id, id): """Detach an interface from an instance.""" context = req.environ['nova.context'] diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py index a537f21dcb..7474145b95 100644 --- a/nova/api/openstack/compute/ips.py +++ b/nova/api/openstack/compute/ips.py @@ -68,12 +68,6 @@ def _get_instance(self, context, server_id): raise exc.HTTPNotFound(explanation=msg) return instance - def create(self, req, server_id, body): - raise exc.HTTPNotImplemented() - - def delete(self, req, server_id, id): - raise exc.HTTPNotImplemented() - @wsgi.serializers(xml=AddressesTemplate) def index(self, req, server_id): context = req.environ["nova.context"] diff --git a/nova/api/openstack/compute/plugins/v3/ips.py b/nova/api/openstack/compute/plugins/v3/ips.py index 194a093ea7..cfdbacf44c 100644 --- a/nova/api/openstack/compute/plugins/v3/ips.py +++ b/nova/api/openstack/compute/plugins/v3/ips.py @@ -42,12 +42,6 @@ def _get_instance(self, context, server_id): raise exc.HTTPNotFound(explanation=msg) return instance - def create(self, req, server_id, body): - raise exc.HTTPNotImplemented() - - def delete(self, req, server_id, id): - raise exc.HTTPNotImplemented() - def index(self, req, server_id): context = req.environ["nova.context"] instance = self._get_instance(context, server_id) From 4113bc37984e7552c92e22c63d15f6f30d7c7d29 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sun, 27 Jul 2014 17:15:21 -0400 Subject: [PATCH 299/486] Convert glance unit tests to not use stubs Takes unit tests that were testing some helper functions in the nova.image.glance module out of the TestGlanceImageService test case class and puts them in separate test classes that do not use the nova.tests.glance.stubs module -- instead, I converted them to just use the simpler mock library and only test the specific things that the unit tests were supposed to test. Change-Id: I2c19268ce6a20eaaed99eda97f63cd1babd5e380 --- nova/tests/image/test_glance.py | 279 ++++++++++++++------------------ 1 file changed, 122 insertions(+), 157 deletions(-) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 3ae970425c..46f50b825a 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -37,13 +37,101 @@ import nova.virt.libvirt.utils as lv_utils CONF = cfg.CONF +NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" -class NullWriter(object): - """Used to test ImageService.get which takes a writer object.""" +class tzinfo(datetime.tzinfo): + @staticmethod + def utcoffset(*args, **kwargs): + return datetime.timedelta() - def write(self, *arg, **kwargs): - pass +NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) + + +class TestConversions(test.NoDBTestCase): + def test_convert_timestamps_to_datetimes(self): + fixture = {'name': None, + 'properties': {}, + 'status': None, + 'is_public': None, + 'created_at': NOW_GLANCE_FORMAT, + 'updated_at': NOW_GLANCE_FORMAT, + 'deleted_at': NOW_GLANCE_FORMAT} + result = glance._convert_timestamps_to_datetimes(fixture) + self.assertEqual(result['created_at'], NOW_DATETIME) + self.assertEqual(result['updated_at'], NOW_DATETIME) + self.assertEqual(result['deleted_at'], NOW_DATETIME) + + def _test_extracting_missing_attributes(self, include_locations): + # Verify behavior from glance objects that are missing attributes + class MyFakeGlanceImage(glance_stubs.FakeImage): + def __init__(self, metadata): + IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at', + 'updated_at', 'status', 'min_disk', + 'min_ram', 'is_public'] + raw = dict.fromkeys(IMAGE_ATTRIBUTES) + raw.update(metadata) + self.__dict__['raw'] = raw + + metadata = { + 'id': 1, + 'created_at': NOW_DATETIME, + 'updated_at': NOW_DATETIME, + } + image = MyFakeGlanceImage(metadata) + observed = glance._extract_attributes( + image, include_locations=include_locations) + expected = { + 'id': 1, + 'name': None, + 'is_public': None, + 'size': None, + 'min_disk': None, + 'min_ram': None, + 'disk_format': None, + 'container_format': None, + 'checksum': None, + 'created_at': NOW_DATETIME, + 'updated_at': NOW_DATETIME, + 'deleted_at': None, + 'deleted': None, + 'status': None, + 'properties': {}, + 'owner': None + } + if include_locations: + expected['locations'] = None + expected['direct_url'] = None + self.assertEqual(expected, observed) + + def test_extracting_missing_attributes_include_locations(self): + self._test_extracting_missing_attributes(include_locations=True) + + def test_extracting_missing_attributes_exclude_locations(self): + self._test_extracting_missing_attributes(include_locations=False) + + +class TestExceptionTranslations(test.NoDBTestCase): + + def test_client_forbidden_to_imagenotauthed(self): + in_exc = glanceclient.exc.Forbidden('123') + out_exc = glance._translate_image_exception('123', in_exc) + self.assertIsInstance(out_exc, exception.ImageNotAuthorized) + + def test_client_httpforbidden_converts_to_imagenotauthed(self): + in_exc = glanceclient.exc.HTTPForbidden('123') + out_exc = glance._translate_image_exception('123', in_exc) + self.assertIsInstance(out_exc, exception.ImageNotAuthorized) + + def test_client_notfound_converts_to_imagenotfound(self): + in_exc = glanceclient.exc.NotFound('123') + out_exc = glance._translate_image_exception('123', in_exc) + self.assertIsInstance(out_exc, exception.ImageNotFound) + + def test_client_httpnotfound_converts_to_imagenotfound(self): + in_exc = glanceclient.exc.HTTPNotFound('123') + out_exc = glance._translate_image_exception('123', in_exc) + self.assertIsInstance(out_exc, exception.ImageNotFound) class TestGlanceSerializer(test.NoDBTestCase): @@ -82,6 +170,36 @@ def test_serialize(self): self.assertEqual(glance._convert_from_string(converted), metadata) +class TestGetImageService(test.NoDBTestCase): + @mock.patch.object(glance.GlanceClientWrapper, '__init__', + return_value=None) + def test_get_remote_service_from_id(self, gcwi_mocked): + id_or_uri = '123' + _ignored, image_id = glance.get_remote_image_service( + mock.sentinel.ctx, id_or_uri) + self.assertEqual(id_or_uri, image_id) + gcwi_mocked.assert_called_once_with() + + @mock.patch.object(glance.GlanceClientWrapper, '__init__', + return_value=None) + def test_get_remote_service_from_href(self, gcwi_mocked): + id_or_uri = 'http://127.0.0.1/123' + _ignored, image_id = glance.get_remote_image_service( + mock.sentinel.ctx, id_or_uri) + self.assertEqual('123', image_id) + gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx, + host='127.0.0.1', + port=80, + use_ssl=False) + + +class NullWriter(object): + """Used to test ImageService.get which takes a writer object.""" + + def write(self, *arg, **kwargs): + pass + + class TestGlanceImageService(test.NoDBTestCase): """Tests the Glance image service. @@ -94,15 +212,6 @@ class TestGlanceImageService(test.NoDBTestCase): APIs (OpenStack, EC2) """ - NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" - NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" - - class tzinfo(datetime.tzinfo): - @staticmethod - def utcoffset(*args, **kwargs): - return datetime.timedelta() - - NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) def setUp(self): super(TestGlanceImageService, self).setUp() @@ -137,34 +246,6 @@ def _fake_create_glance_client(context, host, port, use_ssl, version): 'fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) - @staticmethod - def _make_fixture(**kwargs): - fixture = {'name': None, - 'properties': {}, - 'status': None, - 'is_public': None} - fixture.update(kwargs) - return fixture - - def _make_datetime_fixture(self): - return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, - updated_at=self.NOW_GLANCE_FORMAT, - deleted_at=self.NOW_GLANCE_FORMAT) - - def test_show_makes_datetimes(self): - fixture = self._make_datetime_fixture() - image_id = self.service.create(self.context, fixture)['id'] - image_meta = self.service.show(self.context, image_id) - self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) - self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) - - def test_detail_makes_datetimes(self): - fixture = self._make_datetime_fixture() - self.service.create(self.context, fixture) - image_meta = self.service.detail(self.context)[0] - self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) - self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) - def test_page_size(self): with mock.patch.object(glance.GlanceClientWrapper, 'call') as a_mock: self.service.detail(self.context, page_size=5) @@ -380,122 +461,6 @@ def data(self, image_id): self.assertTrue(client.data_called) self.assertFalse(mock_copy_image.called) - def test_client_forbidden_converts_to_imagenotauthed(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a Forbidden exception.""" - def get(self, image_id): - raise glanceclient.exc.Forbidden(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - self.assertRaises(exception.ImageNotAuthorized, service.download, - self.context, image_id, dst_path=os.devnull) - - def test_client_httpforbidden_converts_to_imagenotauthed(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a HTTPForbidden exception.""" - def get(self, image_id): - raise glanceclient.exc.HTTPForbidden(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - self.assertRaises(exception.ImageNotAuthorized, service.download, - self.context, image_id, dst_path=os.devnull) - - def test_client_notfound_converts_to_imagenotfound(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a NotFound exception.""" - def get(self, image_id): - raise glanceclient.exc.NotFound(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - self.assertRaises(exception.ImageNotFound, service.download, - self.context, image_id, dst_path=os.devnull) - - def test_client_httpnotfound_converts_to_imagenotfound(self): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that raises a HTTPNotFound exception.""" - def get(self, image_id): - raise glanceclient.exc.HTTPNotFound(image_id) - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - self.assertRaises(exception.ImageNotFound, service.download, - self.context, image_id, dst_path=os.devnull) - - def test_glance_client_image_id(self): - fixture = self._make_fixture(name='test image') - image_id = self.service.create(self.context, fixture)['id'] - (service, same_id) = glance.get_remote_image_service( - self.context, image_id) - self.assertEqual(same_id, image_id) - - def test_glance_client_image_ref(self): - fixture = self._make_fixture(name='test image') - image_id = self.service.create(self.context, fixture)['id'] - image_url = 'http://something-less-likely/%s' % image_id - (service, same_id) = glance.get_remote_image_service( - self.context, image_url) - self.assertEqual(same_id, image_id) - self.assertEqual(service._client.host, 'something-less-likely') - - def _test_extracting_missing_attributes(self, include_locations): - """Verify behavior from glance objects that are missing attributes - - This fakes the image class and is missing attribute as the client can - return if they're not set in the database. - """ - class MyFakeGlanceImage(glance_stubs.FakeImage): - def __init__(self, metadata): - IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at', - 'updated_at', 'status', 'min_disk', - 'min_ram', 'is_public'] - raw = dict.fromkeys(IMAGE_ATTRIBUTES) - raw.update(metadata) - self.__dict__['raw'] = raw - - metadata = { - 'id': 1, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - } - image = MyFakeGlanceImage(metadata) - observed = glance._extract_attributes( - image, include_locations=include_locations) - expected = { - 'id': 1, - 'name': None, - 'is_public': None, - 'size': None, - 'min_disk': None, - 'min_ram': None, - 'disk_format': None, - 'container_format': None, - 'checksum': None, - 'created_at': self.NOW_DATETIME, - 'updated_at': self.NOW_DATETIME, - 'deleted_at': None, - 'deleted': None, - 'status': None, - 'properties': {}, - 'owner': None - } - if include_locations: - expected['locations'] = None - expected['direct_url'] = None - self.assertEqual(expected, observed) - - def test_extracting_missing_attributes_include_locations(self): - self._test_extracting_missing_attributes(include_locations=True) - - def test_extracting_missing_attributes_exclude_locations(self): - self._test_extracting_missing_attributes(include_locations=False) - def _create_failing_glance_client(info): class MyGlanceStubClient(glance_stubs.StubGlanceClient): From e779c049105a4fbd76a9207e9a42934f87c816dc Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sun, 27 Jul 2014 17:28:39 -0400 Subject: [PATCH 300/486] Remove duplicate test of passing glance params Simply removes a duplicate test of the passing of page size parameters from the TestGlanceImageService and adds the page_size parameter and check to the similar test in TestDetail.test_detail_params_passed() test method. Change-Id: I2f7fafbb5e33e0f81fd8d493babc0d47966c6791 --- nova/tests/image/test_glance.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 46f50b825a..2effafa67a 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -246,14 +246,6 @@ def _fake_create_glance_client(context, host, port, use_ssl, version): 'fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) - def test_page_size(self): - with mock.patch.object(glance.GlanceClientWrapper, 'call') as a_mock: - self.service.detail(self.context, page_size=5) - self.assertEqual(a_mock.called, True) - a_mock.assert_called_with(self.context, 1, 'list', - filters={'is_public': 'none'}, - page_size=5) - def test_download_with_retries(self): tries = [0] @@ -777,20 +769,22 @@ def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock, self.assertFalse(trans_from_mock.called) self.assertEqual([], images) - @mock.patch('nova.image.glance._extract_query_params') @mock.patch('nova.image.glance._translate_from_glance') @mock.patch('nova.image.glance._is_image_available') - def test_detail_params_passed(self, is_avail_mock, _trans_from_mock, - ext_query_mock): - params = dict(limit=10) - ext_query_mock.return_value = params + def test_detail_params_passed(self, is_avail_mock, _trans_from_mock): client = mock.MagicMock() client.call.return_value = [mock.sentinel.images_0] ctx = mock.sentinel.ctx service = glance.GlanceImageService(client) - service.detail(ctx, **params) + service.detail(ctx, page_size=5, limit=10) - client.call.assert_called_once_with(ctx, 1, 'list', limit=10) + expected_filters = { + 'is_public': 'none' + } + client.call.assert_called_once_with(ctx, 1, 'list', + filters=expected_filters, + page_size=5, + limit=10) @mock.patch('nova.image.glance._reraise_translated_exception') @mock.patch('nova.image.glance._extract_query_params') From 6132f991bdc8515aa665db16fef260ff71a618e6 Mon Sep 17 00:00:00 2001 From: Burt Holzman Date: Fri, 11 Jul 2014 16:31:57 -0500 Subject: [PATCH 301/486] Enable terminate for EC2 InstanceInitiatedShutdownBehavior The EC2 API supports an instance attribute called InstanceInitiatedShutdownBehavior (IISB) which can be set to 'stop' or 'terminate' (default: 'stop'). When the instance initiates its own shutdown, this determines whether or not the instance hangs around in the Shutoff state or is terminated by the system. In nova, this is handled by the shutdown_terminate boolean. IISB = stop => shutdown_terminate = False IISB = terminate => shutdown_terminate = True sync_instance_power_state now invokes compute_api.delete if shutdown_terminate = True and we detect the instance power state has gone from Running to Shutdown. Closes-Bug: #1131395 Change-Id: I284ae7a84384f19131703c4ad44e0e5f5b03f5d4 --- nova/api/ec2/cloud.py | 6 +++++- nova/compute/api.py | 28 +++++++++----------------- nova/compute/manager.py | 5 ++++- nova/tests/api/ec2/test_cloud.py | 6 ++++-- nova/tests/compute/test_compute_api.py | 3 --- nova/tests/compute/test_compute_mgr.py | 19 +++++++++++++---- 6 files changed, 38 insertions(+), 29 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 555e9b6ef5..0048919bb7 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1340,6 +1340,9 @@ def run_instances(self, context, **kwargs): msg = _('Image must be available') raise exception.ImageNotActive(message=msg) + iisb = kwargs.get('instance_initiated_shutdown_behavior', 'stop') + shutdown_terminate = (iisb == 'terminate') + flavor = objects.Flavor.get_by_name(context, kwargs.get('instance_type', None)) @@ -1355,7 +1358,8 @@ def run_instances(self, context, **kwargs): security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), - block_device_mapping=kwargs.get('block_device_mapping', {})) + block_device_mapping=kwargs.get('block_device_mapping', {}), + shutdown_terminate=shutdown_terminate) instances = self._format_run_instances(context, resv_id) if instances: diff --git a/nova/compute/api.py b/nova/compute/api.py index b75f701256..7f9495b666 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -801,7 +801,7 @@ def _build_filter_properties(self, context, scheduler_hints, forced_host, def _provision_instances(self, context, instance_type, min_count, max_count, base_options, boot_meta, security_groups, - block_device_mapping): + block_device_mapping, shutdown_terminate): # Reserve quotas num_instances, quotas = self._check_num_instances_quota( context, instance_type, min_count, max_count) @@ -814,7 +814,7 @@ def _provision_instances(self, context, instance_type, min_count, instance = self.create_db_entry_for_new_instance( context, instance_type, boot_meta, instance, security_groups, block_device_mapping, - num_instances, i) + num_instances, i, shutdown_terminate) instances.append(instance) # send a state update notification for the initial create to @@ -925,7 +925,7 @@ def _create_instance(self, context, instance_type, requested_networks, config_drive, block_device_mapping, auto_disk_config, reservation_id=None, scheduler_hints=None, - legacy_bdm=True): + legacy_bdm=True, shutdown_terminate=False): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation. @@ -985,7 +985,7 @@ def _create_instance(self, context, instance_type, instances = self._provision_instances(context, instance_type, min_count, max_count, base_options, boot_meta, security_groups, - block_device_mapping) + block_device_mapping, shutdown_terminate) filter_properties = self._build_filter_properties(context, scheduler_hints, forced_host, forced_node, instance_type) @@ -1151,15 +1151,6 @@ def _subsequent_list(l): if num_local > max_local: raise exception.InvalidBDMLocalsLimit() - def _populate_instance_shutdown_terminate(self, instance, image, - block_device_mapping): - """Populate instance shutdown_terminate information.""" - image_properties = image.get('properties', {}) - if (block_device_mapping or - image_properties.get('mappings') or - image_properties.get('block_device_mapping')): - instance.shutdown_terminate = False - def _populate_instance_names(self, instance, num_instances): """Populate instance display_name and hostname.""" display_name = instance.get('display_name') @@ -1226,7 +1217,7 @@ def _populate_instance_for_create(self, instance, image, # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, instance, security_group, block_device_mapping, num_instances, - index): + index, shutdown_terminate=False): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). @@ -1239,8 +1230,7 @@ def create_db_entry_for_new_instance(self, context, instance_type, image, self._populate_instance_names(instance, num_instances) - self._populate_instance_shutdown_terminate(instance, image, - block_device_mapping) + instance.shutdown_terminate = shutdown_terminate self.security_group_api.ensure_default(context) instance.create(context) @@ -1320,7 +1310,8 @@ def create(self, context, instance_type, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, - auto_disk_config=None, scheduler_hints=None, legacy_bdm=True): + auto_disk_config=None, scheduler_hints=None, legacy_bdm=True, + shutdown_terminate=False): """Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. @@ -1349,7 +1340,8 @@ def create(self, context, instance_type, requested_networks, config_drive, block_device_mapping, auto_disk_config, scheduler_hints=scheduler_hints, - legacy_bdm=legacy_bdm) + legacy_bdm=legacy_bdm, + shutdown_terminate=shutdown_terminate) def trigger_provider_fw_rules_refresh(self, context): """Called when a rule is added/removed from a provider firewall.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 78ce537967..955ebc23d1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -5536,7 +5536,10 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # Note(maoy): here we call the API instead of # brutally updating the vm_state in the database # to allow all the hooks and checks to be performed. - self.compute_api.stop(context, db_instance) + if db_instance.shutdown_terminate: + self.compute_api.delete(context, db_instance) + else: + self.compute_api.stop(context, db_instance) except Exception: # Note(maoy): there is no need to propagate the error # because the same power_state will be retrieved next diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 8c80885c2d..bf1fd6338f 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -2832,9 +2832,9 @@ def fake_show(self, context, id_, **kwargs): 'container_format': 'ami', 'status': 'active'} - # NOTE(yamahata): create ami-3 ... ami-6 + # NOTE(yamahata): create ami-3 ... ami-7 # ami-1 and ami-2 is already created by setUp() - for i in range(3, 7): + for i in range(3, 8): db.s3_image_create(self.context, 'ami-%d' % i) self.stubs.Set(fake._FakeImageService, 'show', fake_show) @@ -2843,6 +2843,8 @@ def fake_show(self, context, id_, **kwargs): test_dia_iisb('stop', image_id='ami-4') test_dia_iisb('stop', image_id='ami-5') test_dia_iisb('stop', image_id='ami-6') + test_dia_iisb('terminate', image_id='ami-7', + instance_initiated_shutdown_behavior='terminate') def test_create_delete_tags(self): diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index 1e5873bee7..b1d004558f 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -2084,8 +2084,6 @@ def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot): @mock.patch.object(objects.Instance, 'create') @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default') - @mock.patch.object(compute_api.API, - '_populate_instance_shutdown_terminate') @mock.patch.object(compute_api.API, '_populate_instance_names') @mock.patch.object(compute_api.API, '_populate_instance_for_create') @mock.patch.object(cinder.API, 'get', @@ -2093,7 +2091,6 @@ def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot): def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get, mock_create, mock_names, - mock_terminate, mock_ensure, mock_inst_create): instance = self._create_instance_obj() diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index d5ffb32056..fbb1d6dc6e 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -799,13 +799,15 @@ def fake_get(*a, **k): self.mox.ReplayAll() self.compute._instance_usage_audit(self.context) - def _get_sync_instance(self, power_state, vm_state, task_state=None): + def _get_sync_instance(self, power_state, vm_state, task_state=None, + shutdown_terminate=False): instance = objects.Instance() instance.uuid = 'fake-uuid' instance.power_state = power_state instance.vm_state = vm_state instance.host = self.compute.host instance.task_state = task_state + instance.shutdown_terminate = shutdown_terminate self.mox.StubOutWithMock(instance, 'refresh') self.mox.StubOutWithMock(instance, 'save') return instance @@ -829,13 +831,17 @@ def test_sync_instance_power_state_running_stopped(self): self.assertEqual(instance.power_state, power_state.SHUTDOWN) def _test_sync_to_stop(self, power_state, vm_state, driver_power_state, - stop=True, force=False): - instance = self._get_sync_instance(power_state, vm_state) + stop=True, force=False, shutdown_terminate=False): + instance = self._get_sync_instance( + power_state, vm_state, shutdown_terminate=shutdown_terminate) instance.refresh(use_slave=False) instance.save() self.mox.StubOutWithMock(self.compute.compute_api, 'stop') + self.mox.StubOutWithMock(self.compute.compute_api, 'delete') self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop') - if stop: + if shutdown_terminate: + self.compute.compute_api.delete(self.context, instance) + elif stop: if force: self.compute.compute_api.force_stop(self.context, instance) else: @@ -858,6 +864,11 @@ def test_sync_instance_power_state_to_stop(self): self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED, power_state.RUNNING, force=True) + def test_sync_instance_power_state_to_terminate(self): + self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, + power_state.SHUTDOWN, + force=False, shutdown_terminate=True) + def test_sync_instance_power_state_to_no_stop(self): for ps in (power_state.PAUSED, power_state.NOSTATE): self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps, From 59f2fe47e3bac6288f2e2db26cdb89c483379aa3 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Mon, 16 Jun 2014 15:06:18 -0700 Subject: [PATCH 302/486] Add ListOfDictOfNullableString field type A new field type is added, it will be a list, with each item in the list is a dict of string. Implements: blueprint make-resource-tracker-use-objects Change-Id: I4d3a310800afe344bc18c755658fc1dc3e792de8 --- nova/objects/fields.py | 4 ++++ nova/tests/objects/test_fields.py | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/nova/objects/fields.py b/nova/objects/fields.py index 27eb12fd96..ecdcbde361 100644 --- a/nova/objects/fields.py +++ b/nova/objects/fields.py @@ -571,6 +571,10 @@ class ListOfStringsField(AutoTypedField): AUTO_TYPE = List(String()) +class ListOfDictOfNullableStringsField(AutoTypedField): + AUTO_TYPE = List(Dict(String(), nullable=True)) + + class ObjectField(AutoTypedField): def __init__(self, objtype, **kwargs): self.AUTO_TYPE = Object(objtype) diff --git a/nova/tests/objects/test_fields.py b/nova/tests/objects/test_fields.py index fd437a2207..7fc6aeceb2 100644 --- a/nova/tests/objects/test_fields.py +++ b/nova/tests/objects/test_fields.py @@ -222,6 +222,29 @@ def test_stringify(self): 'key': 'val'})) +class TestListOfDictOfNullableStringsField(TestField): + def setUp(self): + super(TestListOfDictOfNullableStringsField, self).setUp() + self.field = fields.ListOfDictOfNullableStringsField() + self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}], + [{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]), + ([{'f': 1}, {'f1': 'b1'}], + [{'f': '1'}, {'f1': 'b1'}]), + ([{'foo': None}], [{'foo': None}])] + self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']] + self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}], + [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])] + self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, + {'f2': None}], + [{'f': 'b'}, {'f1': 'b1'}, + {'f2': None}])] + + def test_stringify(self): + self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]", + self.field.stringify( + [{'f': None, 'f1': 'b1'}, {'f2': 'b2'}])) + + class TestList(TestField): def setUp(self): super(TestList, self).setUp() From 9ca93225e07cb62255efacf769cfda445833a8c6 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Wed, 25 Jun 2014 11:12:02 +0930 Subject: [PATCH 303/486] Standardize logging for v2 api extensions - add log translation hints for warning, error and info levels - remove pointless debug message Change-Id: I67c5e77e62f8ef89204b67426b88856ef91dfc26 --- .../compute/contrib/admin_actions.py | 21 ++++++++++--------- .../openstack/compute/contrib/aggregates.py | 2 -- .../compute/contrib/floating_ips_bulk.py | 2 -- .../api/openstack/compute/contrib/multinic.py | 3 ++- .../compute/contrib/os_tenant_networks.py | 7 ++++--- nova/api/openstack/compute/servers.py | 3 +-- 6 files changed, 18 insertions(+), 20 deletions(-) diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py index c7f7f40381..8921aeb944 100644 --- a/nova/api/openstack/compute/contrib/admin_actions.py +++ b/nova/api/openstack/compute/contrib/admin_actions.py @@ -25,6 +25,7 @@ from nova.compute import vm_states from nova import exception from nova.i18n import _ +from nova.i18n import _LE from nova.openstack.common import log as logging from nova.openstack.common import strutils @@ -64,7 +65,7 @@ def _pause(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::pause %s"), readable) + LOG.exception(_LE("Compute.api::pause %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -86,7 +87,7 @@ def _unpause(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::unpause %s"), readable) + LOG.exception(_LE("Compute.api::unpause %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -108,7 +109,7 @@ def _suspend(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("compute.api::suspend %s"), readable) + LOG.exception(_LE("compute.api::suspend %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -130,7 +131,7 @@ def _resume(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("compute.api::resume %s"), readable) + LOG.exception(_LE("compute.api::resume %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -156,7 +157,7 @@ def _migrate(self, req, id, body): except exception.NoValidHost as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except Exception as e: - LOG.exception(_("Error in migrate %s"), e) + LOG.exception(_LE("Error in migrate %s"), e) raise exc.HTTPBadRequest() return webob.Response(status_int=202) @@ -175,7 +176,7 @@ def _reset_network(self, req, id, body): raise exc.HTTPConflict(explanation=e.format_message()) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::reset_network %s"), readable) + LOG.exception(_LE("Compute.api::reset_network %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -194,7 +195,7 @@ def _inject_network_info(self, req, id, body): raise exc.HTTPConflict(explanation=e.format_message()) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::inject_network_info %s"), readable) + LOG.exception(_LE("Compute.api::inject_network_info %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -211,7 +212,7 @@ def _lock(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::lock %s"), readable) + LOG.exception(_LE("Compute.api::lock %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -230,7 +231,7 @@ def _unlock(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::unlock %s"), readable) + LOG.exception(_LE("Compute.api::unlock %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @@ -388,7 +389,7 @@ def _reset_state(self, req, id, body): raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() - LOG.exception(_("Compute.api::resetState %s"), readable) + LOG.exception(_LE("Compute.api::resetState %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py index e6a29a8e1b..852f4be935 100644 --- a/nova/api/openstack/compute/contrib/aggregates.py +++ b/nova/api/openstack/compute/contrib/aggregates.py @@ -23,10 +23,8 @@ from nova.compute import api as compute_api from nova import exception from nova.i18n import _ -from nova.openstack.common import log as logging from nova import utils -LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'aggregates') diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py index 4a6facba13..6b75e89e9f 100644 --- a/nova/api/openstack/compute/contrib/floating_ips_bulk.py +++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py @@ -85,8 +85,6 @@ def create(self, req, body): raise webob.exc.HTTPUnprocessableEntity() params = body['floating_ips_bulk_create'] - LOG.debug(params) - if 'ip_range' not in params: raise webob.exc.HTTPUnprocessableEntity() ip_range = params['ip_range'] diff --git a/nova/api/openstack/compute/contrib/multinic.py b/nova/api/openstack/compute/contrib/multinic.py index 65937783f8..40a61a778e 100644 --- a/nova/api/openstack/compute/contrib/multinic.py +++ b/nova/api/openstack/compute/contrib/multinic.py @@ -23,6 +23,7 @@ from nova import compute from nova import exception from nova.i18n import _ +from nova.i18n import _LE from nova.openstack.common import log as logging @@ -81,7 +82,7 @@ def _remove_fixed_ip(self, req, id, body): try: self.compute_api.remove_fixed_ip(context, instance, address) except exception.FixedIpNotFoundForSpecificInstance: - LOG.exception(_("Unable to find address %r") % address, + LOG.exception(_LE("Unable to find address %r"), address, instance=instance) raise exc.HTTPBadRequest() diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py index 71556b7cf5..5fdfe1a723 100644 --- a/nova/api/openstack/compute/contrib/os_tenant_networks.py +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -23,6 +23,7 @@ from nova import context as nova_context from nova import exception from nova.i18n import _ +from nova.i18n import _LE from nova.i18n import _LI import nova.network from nova.openstack.common import log as logging @@ -81,7 +82,7 @@ def _refresh_default_networks(self): try: self._default_networks = self._get_default_networks() except Exception: - LOG.exception(_("Failed to get default networks")) + LOG.exception(_LE("Failed to get default networks")) def _get_default_networks(self): project_id = CONF.neutron_default_tenant_id @@ -120,8 +121,8 @@ def delete(self, req, id): reservation = QUOTAS.reserve(context, networks=-1) except Exception: reservation = None - LOG.exception(_("Failed to update usages deallocating " - "network.")) + LOG.exception(_LE("Failed to update usages deallocating " + "network.")) LOG.info(_LI("Deleting network with id %s"), id) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index dbe7290fa5..c5cc30b0a6 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -606,8 +606,7 @@ def _get_servers(self, req, is_detail): msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: - log_msg = _("Flavor '%s' could not be found ") - LOG.debug(log_msg, search_opts['flavor']) + LOG.debug("Flavor '%s' could not be found", search_opts['flavor']) # TODO(mriedem): Move to ObjectListBase.__init__ for empty lists. instance_list = objects.InstanceList(objects=[]) From 8c8c5f7c8548523f09d45ea3346b7a0754f3224e Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Thu, 26 Jun 2014 14:54:49 +0930 Subject: [PATCH 304/486] Standardize logging for v3 api extensions - add log translation hints - remove unneeded logging imports Change-Id: I4845ee70c23336b61e5aac70b9b5021e114f8ac5 --- nova/api/openstack/compute/plugins/v3/access_ips.py | 2 -- nova/api/openstack/compute/plugins/v3/admin_actions.py | 2 -- nova/api/openstack/compute/plugins/v3/aggregates.py | 2 -- nova/api/openstack/compute/plugins/v3/create_backup.py | 2 -- nova/api/openstack/compute/plugins/v3/evacuate.py | 2 -- nova/api/openstack/compute/plugins/v3/lock_server.py | 2 -- nova/api/openstack/compute/plugins/v3/migrate_server.py | 2 -- nova/api/openstack/compute/plugins/v3/multinic.py | 2 -- nova/api/openstack/compute/plugins/v3/pause_server.py | 2 -- 9 files changed, 18 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/access_ips.py b/nova/api/openstack/compute/plugins/v3/access_ips.py index d7378d3a2a..847ed594d1 100644 --- a/nova/api/openstack/compute/plugins/v3/access_ips.py +++ b/nova/api/openstack/compute/plugins/v3/access_ips.py @@ -17,11 +17,9 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.i18n import _ -from nova.openstack.common import log as logging from nova import utils ALIAS = "os-access-ips" -LOG = logging.getLogger(__name__) authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/admin_actions.py b/nova/api/openstack/compute/plugins/v3/admin_actions.py index 3759cc5939..a5939be545 100644 --- a/nova/api/openstack/compute/plugins/v3/admin_actions.py +++ b/nova/api/openstack/compute/plugins/v3/admin_actions.py @@ -23,9 +23,7 @@ from nova import compute from nova.compute import vm_states from nova import exception -from nova.openstack.common import log as logging -LOG = logging.getLogger(__name__) ALIAS = "os-admin-actions" # States usable in resetState action diff --git a/nova/api/openstack/compute/plugins/v3/aggregates.py b/nova/api/openstack/compute/plugins/v3/aggregates.py index 9df2f87b80..7aa3cc4550 100644 --- a/nova/api/openstack/compute/plugins/v3/aggregates.py +++ b/nova/api/openstack/compute/plugins/v3/aggregates.py @@ -26,11 +26,9 @@ from nova.compute import api as compute_api from nova import exception from nova.i18n import _ -from nova.openstack.common import log as logging from nova import utils ALIAS = "os-aggregates" -LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', "v3:" + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/create_backup.py b/nova/api/openstack/compute/plugins/v3/create_backup.py index 07f8759e06..2ca40eb11d 100644 --- a/nova/api/openstack/compute/plugins/v3/create_backup.py +++ b/nova/api/openstack/compute/plugins/v3/create_backup.py @@ -24,9 +24,7 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common import log as logging -LOG = logging.getLogger(__name__) ALIAS = "os-create-backup" authorize = extensions.extension_authorizer('compute', "v3:" + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/evacuate.py b/nova/api/openstack/compute/plugins/v3/evacuate.py index 26fc12fdcd..6bb616a757 100644 --- a/nova/api/openstack/compute/plugins/v3/evacuate.py +++ b/nova/api/openstack/compute/plugins/v3/evacuate.py @@ -24,7 +24,6 @@ from nova import compute from nova import exception from nova.i18n import _ -from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import utils @@ -32,7 +31,6 @@ CONF.import_opt('enable_instance_password', 'nova.api.openstack.compute.servers') -LOG = logging.getLogger(__name__) ALIAS = "os-evacuate" authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/lock_server.py b/nova/api/openstack/compute/plugins/v3/lock_server.py index 4df6df24f7..662e1b2baf 100644 --- a/nova/api/openstack/compute/plugins/v3/lock_server.py +++ b/nova/api/openstack/compute/plugins/v3/lock_server.py @@ -19,9 +19,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute -from nova.openstack.common import log as logging -LOG = logging.getLogger(__name__) ALIAS = "os-lock-server" diff --git a/nova/api/openstack/compute/plugins/v3/migrate_server.py b/nova/api/openstack/compute/plugins/v3/migrate_server.py index 179d776e27..87e9a939e4 100644 --- a/nova/api/openstack/compute/plugins/v3/migrate_server.py +++ b/nova/api/openstack/compute/plugins/v3/migrate_server.py @@ -23,10 +23,8 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common import log as logging from nova.openstack.common import strutils -LOG = logging.getLogger(__name__) ALIAS = "os-migrate-server" diff --git a/nova/api/openstack/compute/plugins/v3/multinic.py b/nova/api/openstack/compute/plugins/v3/multinic.py index b321862456..d7c1296c88 100644 --- a/nova/api/openstack/compute/plugins/v3/multinic.py +++ b/nova/api/openstack/compute/plugins/v3/multinic.py @@ -25,10 +25,8 @@ from nova.api import validation from nova import compute from nova import exception -from nova.openstack.common import log as logging -LOG = logging.getLogger(__name__) ALIAS = "os-multinic" authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS) diff --git a/nova/api/openstack/compute/plugins/v3/pause_server.py b/nova/api/openstack/compute/plugins/v3/pause_server.py index dbbead7c07..1e832c96a7 100644 --- a/nova/api/openstack/compute/plugins/v3/pause_server.py +++ b/nova/api/openstack/compute/plugins/v3/pause_server.py @@ -22,9 +22,7 @@ from nova import compute from nova import exception from nova.i18n import _ -from nova.openstack.common import log as logging -LOG = logging.getLogger(__name__) ALIAS = "os-pause-server" From 5a607c814bb329daf2180c9aaa71f107e7aa6e09 Mon Sep 17 00:00:00 2001 From: shuangtai Date: Tue, 5 Aug 2014 14:10:57 +0800 Subject: [PATCH 305/486] add log exception hints in some modules Add hints for log exception messages Change-Id: I51b30d58bcb4b9fa8e51b29af1cfff33c0b432aa --- nova/image/s3.py | 18 +++++++++--------- nova/objects/base.py | 5 ++--- nova/objects/instance.py | 4 ++-- nova/objects/instance_info_cache.py | 6 +++--- nova/servicegroup/drivers/db.py | 4 ++-- nova/servicegroup/drivers/mc.py | 4 ++-- nova/servicegroup/drivers/zk.py | 10 +++++----- 7 files changed, 25 insertions(+), 26 deletions(-) diff --git a/nova/image/s3.py b/nova/image/s3.py index fa7278e257..5d5c4edff0 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,7 +31,7 @@ from nova.api.ec2 import ec2utils import nova.cert.rpcapi from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LE from nova.image import glance from nova.openstack.common import log as logging from nova.openstack.common import processutils @@ -328,8 +328,8 @@ def _update_image_data(context, image_uuid, image_data): shutil.copyfileobj(part, combined) except Exception: - LOG.exception(_("Failed to download %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_LE("Failed to download %(image_location)s " + "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_download') return @@ -345,8 +345,8 @@ def _update_image_data(context, image_uuid, image_data): self._decrypt_image(context, enc_filename, encrypted_key, encrypted_iv, dec_filename) except Exception: - LOG.exception(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_LE("Failed to decrypt %(image_location)s " + "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_decrypt') return @@ -356,8 +356,8 @@ def _update_image_data(context, image_uuid, image_data): unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: - LOG.exception(_("Failed to untar %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_LE("Failed to untar %(image_location)s " + "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_untar') return @@ -366,8 +366,8 @@ def _update_image_data(context, image_uuid, image_data): with open(unz_filename) as image_file: _update_image_data(context, image_uuid, image_file) except Exception: - LOG.exception(_("Failed to upload %(image_location)s " - "to %(image_path)s"), log_vars) + LOG.exception(_LE("Failed to upload %(image_location)s " + "to %(image_path)s"), log_vars) _update_image_state(context, image_uuid, 'failed_upload') return diff --git a/nova/objects/base.py b/nova/objects/base.py index 63a2705aaa..f6a8d505d9 100644 --- a/nova/objects/base.py +++ b/nova/objects/base.py @@ -24,7 +24,7 @@ from nova import context from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LE from nova import objects from nova.objects import fields from nova.openstack.common import log as logging @@ -83,8 +83,7 @@ def setter(self, value, name=name, field=field): return setattr(self, attrname, field_value) except Exception: attr = "%s.%s" % (self.obj_name(), name) - LOG.exception(_('Error setting %(attr)s') % - {'attr': attr}) + LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr}) raise setattr(cls, name, property(getter, setter)) diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 275e7db89d..a0ea3ba9ec 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -17,7 +17,7 @@ from nova.compute import flavors from nova import db from nova import exception -from nova.i18n import _ +from nova.i18n import _LE from nova import notifications from nova import objects from nova.objects import base @@ -428,7 +428,7 @@ def _handle_cell_update_from_api(): try: getattr(self, '_save_%s' % field)(context) except AttributeError: - LOG.exception(_('No save handler for %s') % field, + LOG.exception(_LE('No save handler for %s'), field, instance=self) elif field in changes: updates[field] = self[field] diff --git a/nova/objects/instance_info_cache.py b/nova/objects/instance_info_cache.py index 10b128e0e8..9d7011937f 100644 --- a/nova/objects/instance_info_cache.py +++ b/nova/objects/instance_info_cache.py @@ -16,7 +16,7 @@ from nova.cells import rpcapi as cells_rpcapi from nova import db from nova import exception -from nova.i18n import _ +from nova.i18n import _LE from nova.objects import base from nova.objects import fields from nova.openstack.common import log as logging @@ -79,8 +79,8 @@ def _info_cache_cells_update(ctxt, info_cache): try: cells_api.instance_info_cache_update_at_top(ctxt, info_cache) except Exception: - LOG.exception(_("Failed to notify cells of instance info " - "cache update")) + LOG.exception(_LE("Failed to notify cells of instance info " + "cache update")) @base.remotable def save(self, context, update_cells=True): diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py index bf45d1ada5..70b7e132ba 100644 --- a/nova/servicegroup/drivers/db.py +++ b/nova/servicegroup/drivers/db.py @@ -18,7 +18,7 @@ from nova import conductor from nova import context -from nova.i18n import _ +from nova.i18n import _, _LE from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.servicegroup import api @@ -107,4 +107,4 @@ def _report_state(self, service): except Exception: # pylint: disable=W0702 if not getattr(service, 'model_disconnected', False): service.model_disconnected = True - LOG.exception(_('model server went away')) + LOG.exception(_LE('model server went away')) diff --git a/nova/servicegroup/drivers/mc.py b/nova/servicegroup/drivers/mc.py index 3d643bb20c..636dec8aa9 100644 --- a/nova/servicegroup/drivers/mc.py +++ b/nova/servicegroup/drivers/mc.py @@ -21,7 +21,7 @@ from nova import conductor from nova import context -from nova.i18n import _ +from nova.i18n import _, _LE from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova.openstack.common import timeutils @@ -102,4 +102,4 @@ def _report_state(self, service): except Exception: # pylint: disable=W0702 if not getattr(service, 'model_disconnected', False): service.model_disconnected = True - LOG.exception(_('model server went away')) + LOG.exception(_LE('model server went away')) diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py index 9ba3ae64f9..a2dc3c83c1 100644 --- a/nova/servicegroup/drivers/zk.py +++ b/nova/servicegroup/drivers/zk.py @@ -20,7 +20,7 @@ from oslo.config import cfg from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LE from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall @@ -87,10 +87,10 @@ def join(self, member_id, group, service=None): try: member = membership.Membership(self._session, path, member_id) except RuntimeError: - LOG.exception(_("Unable to join. It is possible that either " - "another node exists with the same name, or " - "this node just restarted. We will try " - "again in a short while to make sure.")) + LOG.exception(_LE("Unable to join. It is possible that either" + " another node exists with the same name, or" + " this node just restarted. We will try " + "again in a short while to make sure.")) eventlet.sleep(CONF.zookeeper.sg_retry_interval) member = membership.Membership(self._session, path, member_id) self._memberships[(group, member_id)] = member From af44b50b6b8187c559c56b9d3f7dc047fc5be407 Mon Sep 17 00:00:00 2001 From: Facundo Farias Date: Mon, 31 Mar 2014 16:26:51 -0300 Subject: [PATCH 306/486] Correct returned HTTP status code (Use 403 instead of 413) The exception HTTPRequestEntityTooLarge should not be used as an exception response based on RFC2616. Because of that, change the returned response to HTTPForbidden. Restore commit for Iab090c40c632a76b0528df8145ad0897c8b649bf And add compute V3 changes. Related Tempest commit to match this change: If376eda0a7929ba2baa4ac4acbb457883bcfc96d DocImpact: corrects HTTP return code from 413 to 403 for quota-related limit faults Closes-Bug: #1298131 Change-Id: I2bb8a60ef254afbfed514cfeebe75355d0de4475 --- nova/api/openstack/compute/plugins/v3/servers.py | 8 ++++---- nova/api/openstack/compute/servers.py | 4 ++-- .../openstack/compute/plugins/v3/test_server_actions.py | 2 +- .../api/openstack/compute/plugins/v3/test_servers.py | 4 ++-- nova/tests/api/openstack/compute/test_server_actions.py | 2 +- nova/tests/api/openstack/compute/test_servers.py | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index ac78ee1e8f..1009556941 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -429,7 +429,7 @@ def show(self, req, id): req.cache_db_instance(instance) return self._view_builder.show(req, instance) - @extensions.expected_errors((400, 409, 413)) + @extensions.expected_errors((400, 403, 409, 413)) @wsgi.response(202) @validation.schema(schema_server_create) def create(self, req, body): @@ -509,7 +509,7 @@ def create(self, req, body): **create_kwargs) except (exception.QuotaError, exception.PortLimitExceeded) as error: - raise exc.HTTPRequestEntityTooLarge( + raise exc.HTTPForbidden( explanation=error.format_message(), headers={'Retry-After': 0}) except exception.InvalidMetadataSize as error: @@ -728,7 +728,7 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): try: self.compute_api.resize(context, instance, flavor_id, **kwargs) except exception.QuotaError as error: - raise exc.HTTPRequestEntityTooLarge( + raise exc.HTTPForbidden( explanation=error.format_message(), headers={'Retry-After': 0}) except exception.FlavorNotFound: @@ -810,7 +810,7 @@ def _flavor_id_from_req_data(self, data): return common.get_id_from_href(flavor_ref) - @extensions.expected_errors((400, 401, 404, 409, 413)) + @extensions.expected_errors((400, 401, 403, 404, 409)) @wsgi.response(202) @wsgi.action('resize') def _action_resize(self, req, id, body): diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index dbe7290fa5..9b54907960 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -958,7 +958,7 @@ def create(self, req, body): legacy_bdm=legacy_bdm) except (exception.QuotaError, exception.PortLimitExceeded) as error: - raise exc.HTTPRequestEntityTooLarge( + raise exc.HTTPForbidden( explanation=error.format_message(), headers={'Retry-After': 0}) except exception.InvalidMetadataSize as error: @@ -1167,7 +1167,7 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): try: self.compute_api.resize(context, instance, flavor_id, **kwargs) except exception.QuotaError as error: - raise exc.HTTPRequestEntityTooLarge( + raise exc.HTTPForbidden( explanation=error.format_message(), headers={'Retry-After': 0}) except exception.FlavorNotFound: diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index 3f03cb084a..ec037b7aac 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -676,7 +676,7 @@ def fake_resize(*args, **kwargs): self.stubs.Set(compute_api.API, 'resize', fake_resize) req = fakes.HTTPRequestV3.blank(self.url) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller._action_resize, req, FAKE_UUID, body) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 6368cb649b..c1b62d7fd0 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -2267,7 +2267,7 @@ def test_create_instance_too_much_metadata(self): self.body['server']['image_ref'] = image_href self.body['server']['metadata']['vote'] = 'fiddletown' self.req.body = jsonutils.dumps(self.body) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_key_too_long(self): @@ -2431,7 +2431,7 @@ def _do_test_create_instance_above_quota(self, resource, allowed, quota, try: self.controller.create(self.req, body=self.body).obj['server'] self.fail('expected quota to be exceeded') - except webob.exc.HTTPRequestEntityTooLarge as e: + except webob.exc.HTTPForbidden as e: self.assertEqual(e.explanation, expected_msg) def test_create_instance_above_quota_instances(self): diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 4a37d11ee1..741c699a50 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -860,7 +860,7 @@ def fake_resize(*args, **kwargs): self.stubs.Set(compute_api.API, 'resize', fake_resize) req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller._action_resize, req, FAKE_UUID, body) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 777c63c9ff..34c3a35d4a 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -2317,7 +2317,7 @@ def test_create_instance_too_much_metadata(self): self.body['server']['imageRef'] = image_href self.body['server']['metadata']['vote'] = 'fiddletown' self.req.body = jsonutils.dumps(self.body) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.req, self.body) def test_create_instance_metadata_key_too_long(self): @@ -3263,7 +3263,7 @@ def _do_test_create_instance_above_quota(self, resource, allowed, quota, try: self.controller.create(self.req, self.body).obj['server'] self.fail('expected quota to be exceeded') - except webob.exc.HTTPRequestEntityTooLarge as e: + except webob.exc.HTTPForbidden as e: self.assertEqual(e.explanation, expected_msg) def test_create_instance_above_quota_instances(self): From cee35213783bcc25b6f1bc1dd48da7f40f547d94 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Fri, 18 Jul 2014 11:47:04 +1200 Subject: [PATCH 307/486] Add a missing instance=instance in compute/mgr This can be logged without context at the moment, so we should really include the instance parameter as normal. Change-Id: I6062f32ae906d958e808c8f1574e0c16b687d856 --- nova/compute/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index f71f7e4c84..ecd834af15 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1905,7 +1905,8 @@ def do_build_and_run_instance(context, instance, image, request_spec, retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. - LOG.debug("Retry info not present, will not reschedule") + LOG.debug("Retry info not present, will not reschedule", + instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._set_instance_error_state(context, instance.uuid) From eca4286e955861e8e1547a8aabf2c4b5c4aad075 Mon Sep 17 00:00:00 2001 From: Robert Pothier Date: Tue, 5 Aug 2014 17:44:59 -0400 Subject: [PATCH 308/486] Enhance PCI whitelist While keeping the existing PCI passthrough functionality intact, this patch makes the following enhancements: * allows aggregated declaration of PCI devices by using '*' and '.' * allows tags to be associated with PCI devices. A whitelist entry is defined as: ["device_id": "",] ["product_id": "",] ["address": "[[[[]:]]:][][.[]]" | "devname": "Ethernet Interface Name",] "physical_network":"name string of the physical network" can be a '*' or a valid device/product id as displayed by the linux utility lspci. The address uses the same syntax as it's in lspci. Refer to lspci's manual for its description about the '-s' switch. The devname can be a valid PCI device name. The only device names that are supported in this specification are those that are displayed by the linux utility ifconfig -a and correspond to either a PF or a VF on a vNIC. There may be 0 or more tags associated with an entry. If the device defined by the address or devname corresponds to a SR-IOV PF, all the VFs under the PF will match the entry. For SR-IOV networking, a pre-defined tag "physical_network" is used to define the physical network that the devices are attached to. Multiple whitelist entries per host are supported as they already are. The fields device_id, product_id, and address or devname will be matched against PCI devices that are returned as a result of querying libvirt. Change-Id: I96e3e0174fa79ef9dd0ffbec172ba1c0420a37f8 Partially Implements: blueprint pci-passthrough-sriov --- nova/exception.py | 11 ++ nova/pci/pci_devspec.py | 179 +++++++++++++++++++++++++++++ nova/pci/pci_utils.py | 55 +++++++++ nova/tests/pci/test_pci_devspec.py | 179 +++++++++++++++++++++++++++++ 4 files changed, 424 insertions(+) create mode 100755 nova/pci/pci_devspec.py create mode 100644 nova/tests/pci/test_pci_devspec.py diff --git a/nova/exception.py b/nova/exception.py index 0bc8bff259..fe761b0fab 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1495,6 +1495,17 @@ class PciDeviceWrongAddressFormat(NovaException): msg_fmt = _("The PCI address %(address)s has an incorrect format.") +class PciDeviceInvalidAddressField(NovaException): + msg_fmt = _("Invalid PCI Whitelist: " + "The PCI address %(address)s has an invalid %(field)s.") + + +class PciDeviceInvalidDeviceName(NovaException): + msg_fmt = _("Invalid PCI Whitelist: " + "The PCI whitelist can specify devname or address," + " but not both") + + class PciDeviceNotFoundById(NotFound): msg_fmt = _("PCI device %(id)s not found") diff --git a/nova/pci/pci_devspec.py b/nova/pci/pci_devspec.py new file mode 100755 index 0000000000..c228e4a15d --- /dev/null +++ b/nova/pci/pci_devspec.py @@ -0,0 +1,179 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import re + +from nova import exception +from nova.openstack.common import jsonutils +from nova.pci import pci_utils + +MAX_VENDOR_ID = 0xFFFF +MAX_PRODUCT_ID = 0xFFFF +MAX_FUNC = 0x7 +MAX_DOMAIN = 0xFFFF +MAX_BUS = 0xFF +MAX_SLOT = 0x1F +ANY = '*' +VIRTFN_RE = re.compile("virtfn\d+") + + +def get_value(v): + return ast.literal_eval("0x" + v) + + +def get_pci_dev_info(pci_obj, property, max, hex_value): + a = getattr(pci_obj, property) + if a == ANY: + return + v = get_value(a) + if v > max: + raise exception.PciConfigInvalidWhitelist( + reason = "invalid %s %s" % (property, a)) + setattr(pci_obj, property, hex_value % v) + + +class PciAddress(object): + """Manages the address fields of the whitelist. + + This class checks the address fields of the pci_passthrough_whitelist + configuration option, validating the address fields. + Example config are: + pci_passthrough_whitelist = {"address":"*:0a:00.*", + "physical_network":"physnet1"} + pci_passthrough_whitelist = {"address":":0a:00.", + This function class will validate the address fields, check for wildcards, + and insert wildcards where the field is left blank. + """ + def __init__(self, pci_addr, is_physical_function): + self.domain = ANY + self.bus = ANY + self.slot = ANY + self.func = ANY + self.is_physical_function = is_physical_function + self._init_address_fields(pci_addr) + + def _check_physical_function(self): + if ANY in (self.domain, self.bus, self.slot, self.func): + return + self.is_physical_function = pci_utils.is_physical_function(self) + + def _init_address_fields(self, pci_addr): + if self.is_physical_function: + (self.domain, self.bus, self.slot, + self.func) = pci_utils.get_pci_address_fields(pci_addr) + return + dbs, sep, func = pci_addr.partition('.') + if func: + fstr = func.strip() + if fstr != ANY: + try: + f = get_value(fstr) + except SyntaxError: + raise exception.PciDeviceWrongAddressFormat( + address=pci_addr) + if f > MAX_FUNC: + raise exception.PciDeviceInvalidAddressField( + address=pci_addr, field="function") + self.func = "%1x" % f + if dbs: + dbs_fields = dbs.split(':') + if len(dbs_fields) > 3: + raise exception.PciDeviceWrongAddressFormat(address=pci_addr) + # If we got a partial address like ":00.", we need to to turn this + # into a domain of ANY, a bus of ANY, and a slot of 00. This code + # allows the address bus and/or domain to be left off + dbs_all = [ANY for x in range(3 - len(dbs_fields))] + dbs_all.extend(dbs_fields) + dbs_checked = [s.strip() or ANY for s in dbs_all] + self.domain, self.bus, self.slot = dbs_checked + get_pci_dev_info(self, 'domain', MAX_DOMAIN, '%04x') + get_pci_dev_info(self, 'bus', MAX_BUS, '%02x') + get_pci_dev_info(self, 'slot', MAX_SLOT, '%02x') + self._check_physical_function() + + def match(self, pci_addr, pci_phys_addr): + # Assume this is called given pci_add and pci_phys_addr from libvirt, + # no attempt is made to verify pci_addr is a VF of pci_phys_addr + if self.is_physical_function: + if not pci_phys_addr: + return False + domain, bus, slot, func = ( + pci_utils.get_pci_address_fields(pci_phys_addr)) + return (self.domain == domain and self.bus == bus and + self.slot == slot and self.func == func) + else: + domain, bus, slot, func = ( + pci_utils.get_pci_address_fields(pci_addr)) + conditions = [ + self.domain in (ANY, domain), + self.bus in (ANY, bus), + self.slot in (ANY, slot), + self.func in (ANY, func) + ] + return all(conditions) + + +class PciDeviceSpec(object): + def __init__(self, dev_spec): + self.dev_spec = dev_spec + self._init_dev_details() + self.dev_count = 0 + + def _init_dev_details(self): + details = jsonutils.loads(self.dev_spec) + self.vendor_id = details.pop("vendor_id", ANY) + self.product_id = details.pop("product_id", ANY) + self.address = details.pop("address", None) + self.dev_name = details.pop("devname", None) + + self.vendor_id = self.vendor_id.strip() + get_pci_dev_info(self, 'vendor_id', MAX_VENDOR_ID, '%04x') + get_pci_dev_info(self, 'product_id', MAX_PRODUCT_ID, '%04x') + + pf = False + if self.address and self.dev_name: + raise exception.PciDeviceInvalidDeviceName() + if not self.address: + if self.dev_name: + self.address, pf = pci_utils.get_function_by_ifname( + self.dev_name) + if not self.address: + raise exception.PciDeviceNotFoundById(id=self.dev_name) + else: + self.address = "*:*:*.*" + + self.address = PciAddress(self.address, pf) + self.tags = details + + def match(self, dev_dict): + conditions = [ + self.vendor_id in (ANY, dev_dict['vendor_id']), + self.product_id in (ANY, dev_dict['product_id']), + self.address.match(dev_dict['address'], + dev_dict.get('phys_function')) + ] + return all(conditions) + + def match_pci_obj(self, pci_obj): + if pci_obj.extra_info: + phy_func = pci_obj.extra_info.get('phys_function') + else: + phy_func = None + return self.match({'vendor_id': pci_obj.vendor_id, + 'product_id': pci_obj.product_id, + 'address': pci_obj.address, + 'phys_function': phy_func}) + + def get_tags(self): + return self.tags diff --git a/nova/pci/pci_utils.py b/nova/pci/pci_utils.py index a9282ff15e..fbdec9effc 100644 --- a/nova/pci/pci_utils.py +++ b/nova/pci/pci_utils.py @@ -15,10 +15,14 @@ # under the License. +import os import re from nova import exception +from nova.i18n import _LE +from nova.openstack.common import log as logging +LOG = logging.getLogger(__name__) PCI_VENDOR_PATTERN = "^(hex{4})$".replace("hex", "[\da-fA-F]") _PCI_ADDRESS_PATTERN = ("^(hex{4}):(hex{2}):(hex{2}).(oct{1})$". @@ -26,6 +30,8 @@ replace("oct", "[0-7]")) _PCI_ADDRESS_REGEX = re.compile(_PCI_ADDRESS_PATTERN) +_VIRTFN_RE = re.compile("virtfn\d+") + def pci_device_prop_match(pci_dev, specs): """Check if the pci_dev meet spec requirement @@ -53,3 +59,52 @@ def parse_address(address): if not m: raise exception.PciDeviceWrongAddressFormat(address=address) return m.groups() + + +def get_pci_address_fields(pci_addr): + dbs, sep, func = pci_addr.partition('.') + domain, bus, slot = dbs.split(':') + return (domain, bus, slot, func) + + +def get_function_by_ifname(ifname): + """Given the device name, returns the PCI address of a an device + and returns True if the address in a physical function. + """ + try: + dev_path = "/sys/class/net/%s/device" % ifname + dev_info = os.listdir(dev_path) + for dev_file in dev_info: + if _VIRTFN_RE.match(dev_file): + return os.readlink(dev_path).strip("./"), True + else: + return os.readlink(dev_path).strip("./"), False + except Exception: + LOG.error(_LE("PCI device %s not found") % ifname) + return None, False + + +def is_physical_function(PciAddress): + dev_path = "/sys/bus/pci/devices/%(d)s:%(b)s:%(s)s.%(f)s/" % { + "d": PciAddress.domain, "b": PciAddress.bus, + "s": PciAddress.slot, "f": PciAddress.func} + try: + dev_info = os.listdir(dev_path) + for dev_file in dev_info: + if _VIRTFN_RE.match(dev_file): + return True + else: + return False + except Exception: + LOG.error(_LE("PCI device %s not found") % dev_path) + return False + + +def get_ifname_by_pci_address(pci_addr): + dev_path = "/sys/bus/pci/devices/%s/net" % (pci_addr) + try: + dev_info = os.listdir(dev_path) + return dev_info.pop() + except Exception: + LOG.error(_LE("PCI device %s not found") % pci_addr) + return None diff --git a/nova/tests/pci/test_pci_devspec.py b/nova/tests/pci/test_pci_devspec.py new file mode 100644 index 0000000000..79c4f4ebc2 --- /dev/null +++ b/nova/tests/pci/test_pci_devspec.py @@ -0,0 +1,179 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock + +from nova import exception +from nova.objects import pci_device +from nova.pci import pci_devspec +from nova import test + +dev = {"vendor_id": "8086", + "product_id": "5057", + "address": "1234:5678:8988.5", + "phys_function": "0000:0a:00.0"} + + +class PciAddressTestCase(test.NoDBTestCase): + def test_wrong_address(self): + pci_info = ('{"vendor_id": "8086", "address": "*: *: *.6",' + + '"product_id": "5057", "physical_network": "hr_net"}') + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertFalse(pci.match(dev)) + + def test_address_too_big(self): + pci_info = ('{"address": "0000:0a:0b:00.5", ' + + '"physical_network": "hr_net"}') + self.assertRaises(exception.PciDeviceWrongAddressFormat, + pci_devspec.PciDeviceSpec, pci_info) + + def test_address_invalid_character(self): + pci_info = '{"address": "0000:h4.12:6", "physical_network": "hr_net"}' + self.assertRaises(exception.PciDeviceWrongAddressFormat, + pci_devspec.PciDeviceSpec, pci_info) + + def test_max_func(self): + pci_info = (('{"address": "0000:0a:00.%s", ' + + '"physical_network": "hr_net"}') % + (pci_devspec.MAX_FUNC + 1)) + exc = self.assertRaises(exception.PciDeviceInvalidAddressField, + pci_devspec.PciDeviceSpec, pci_info) + msg = ('Invalid PCI Whitelist: ' + 'The PCI address 0000:0a:00.%s has an invalid function.' + % (pci_devspec.MAX_FUNC + 1)) + self.assertEqual(msg, unicode(exc)) + + def test_max_domain(self): + pci_info = ('{"address": "%x:0a:00.5", "physical_network":"hr_net"}' + % (pci_devspec.MAX_DOMAIN + 1)) + exc = self.assertRaises(exception.PciConfigInvalidWhitelist, + pci_devspec.PciDeviceSpec, pci_info) + msg = ('Invalid PCI devices Whitelist config invalid domain %x' + % (pci_devspec.MAX_DOMAIN + 1)) + self.assertEqual(msg, unicode(exc)) + + def test_max_bus(self): + pci_info = ('{"address": "0000:%x:00.5", "physical_network":"hr_net"}' + % (pci_devspec.MAX_BUS + 1)) + exc = self.assertRaises(exception.PciConfigInvalidWhitelist, + pci_devspec.PciDeviceSpec, pci_info) + msg = ('Invalid PCI devices Whitelist config invalid bus %x' + % (pci_devspec.MAX_BUS + 1)) + self.assertEqual(msg, unicode(exc)) + + def test_max_slot(self): + pci_info = ('{"address": "0000:0a:%x.5", "physical_network":"hr_net"}' + % (pci_devspec.MAX_SLOT + 1)) + exc = self.assertRaises(exception.PciConfigInvalidWhitelist, + pci_devspec.PciDeviceSpec, pci_info) + msg = ('Invalid PCI devices Whitelist config invalid slot %x' + % (pci_devspec.MAX_SLOT + 1)) + self.assertEqual(msg, unicode(exc)) + + def test_address_is_undefined(self): + pci_info = '{"vendor_id":"8086", "product_id":"5057"}' + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertTrue(pci.match(dev)) + + def test_partial_address(self): + pci_info = '{"address":":0a:00.", "physical_network":"hr_net"}' + pci = pci_devspec.PciDeviceSpec(pci_info) + dev = {"vendor_id": "1137", + "product_id": "0071", + "address": "0000:0a:00.5", + "phys_function": "0000:0a:00.0"} + self.assertTrue(pci.match(dev)) + + @mock.patch('nova.pci.pci_utils.is_physical_function', return_value = True) + def test_address_is_pf(self, mock_is_physical_function): + pci_info = '{"address":"0000:0a:00.0", "physical_network":"hr_net"}' + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertTrue(pci.match(dev)) + + +class PciDevSpecTestCase(test.NoDBTestCase): + def setUp(self): + super(PciDevSpecTestCase, self).setUp() + + def test_spec_match(self): + pci_info = ('{"vendor_id": "8086","address": "*: *: *.5",' + + '"product_id": "5057", "physical_network": "hr_net"}') + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertTrue(pci.match(dev)) + + def test_invalid_vendor_id(self): + pci_info = ('{"vendor_id": "8087","address": "*: *: *.5", ' + + '"product_id": "5057", "physical_network": "hr_net"}') + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertFalse(pci.match(dev)) + + def test_vendor_id_out_of_range(self): + pci_info = ('{"vendor_id": "80860", "address": "*:*:*.5", ' + + '"product_id": "5057", "physical_network": "hr_net"}') + exc = self.assertRaises(exception.PciConfigInvalidWhitelist, + pci_devspec.PciDeviceSpec, pci_info) + self.assertEqual("Invalid PCI devices Whitelist config " + "invalid vendor_id 80860", unicode(exc)) + + def test_invalid_product_id(self): + pci_info = ('{"vendor_id": "8086","address": "*: *: *.5", ' + + '"product_id": "5056", "physical_network": "hr_net"}') + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertFalse(pci.match(dev)) + + def test_product_id_out_of_range(self): + pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' + + '"product_id": "50570", "physical_network": "hr_net"}') + exc = self.assertRaises(exception.PciConfigInvalidWhitelist, + pci_devspec.PciDeviceSpec, pci_info) + self.assertEqual("Invalid PCI devices Whitelist config " + "invalid product_id 50570", unicode(exc)) + + def test_devname_and_address(self): + pci_info = ('{"devname": "eth0", "vendor_id":"8086", ' + + '"address":"*:*:*.5", "physical_network": "hr_net"}') + self.assertRaises(exception.PciDeviceInvalidDeviceName, + pci_devspec.PciDeviceSpec, pci_info) + + @mock.patch('nova.pci.pci_utils.get_function_by_ifname', + return_value = ("0000:0a:00.0", True)) + def test_by_name(self, mock_get_function_by_ifname): + pci_info = '{"devname": "eth0", "physical_network": "hr_net"}' + pci = pci_devspec.PciDeviceSpec(pci_info) + self.assertTrue(pci.match(dev)) + + @mock.patch('nova.pci.pci_utils.get_function_by_ifname', + return_value = (None, False)) + def test_invalid_name(self, mock_get_function_by_ifname): + pci_info = '{"devname": "lo", "physical_network": "hr_net"}' + exc = self.assertRaises(exception.PciDeviceNotFoundById, + pci_devspec.PciDeviceSpec, pci_info) + self.assertEqual('PCI device lo not found', unicode(exc)) + + def test_pci_obj(self): + pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' + + '"product_id": "5057", "physical_network": "hr_net"}') + + pci = pci_devspec.PciDeviceSpec(pci_info) + pci_dev = { + 'compute_node_id': 1, + 'address': '0000:00:00.5', + 'product_id': '5057', + 'vendor_id': '8086', + 'status': 'available', + 'extra_k1': 'v1', + } + + pci_obj = pci_device.PciDevice.create(pci_dev) + self.assertTrue(pci.match_pci_obj(pci_obj)) From 5d96724a164c3202b576e92276320b035cf100ba Mon Sep 17 00:00:00 2001 From: pkholkin Date: Fri, 8 Aug 2014 13:29:15 +0400 Subject: [PATCH 309/486] Optimize instance_floating_address_get_all Removed one extra round-trip to a database and iteration through 'fixed_ip_ids' list Change-Id: I65febddbc48350da93179aee8ff3c4130a6671d4 --- nova/db/sqlalchemy/api.py | 17 +++++------------ nova/tests/db/test_db_api.py | 4 ++++ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6bd1db5ca4..7a7d615f05 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2170,19 +2170,12 @@ def instance_floating_address_get_all(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) - fixed_ip_ids = model_query(context, models.FixedIp.id, - base_model=models.FixedIp).\ - filter_by(instance_uuid=instance_uuid).\ - all() - if not fixed_ip_ids: - raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) - - fixed_ip_ids = [fixed_ip_id.id for fixed_ip_id in fixed_ip_ids] - - floating_ips = model_query(context, models.FloatingIp.address, + floating_ips = model_query(context, + models.FloatingIp.address, base_model=models.FloatingIp).\ - filter(models.FloatingIp.fixed_ip_id.in_(fixed_ip_ids)).\ - all() + join(models.FloatingIp.fixed_ip).\ + filter_by(instance_uuid=instance_uuid) + return [floating_ip.address for floating_ip in floating_ips] diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 90c5c002c6..a65cf15dbe 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -2000,6 +2000,10 @@ def test_instance_floating_address_get_all(self): db.instance_floating_address_get_all(ctxt, instance_uuids[2]) self.assertEqual(set([float_addresses[2]]), set(real_float_addresses)) + self.assertRaises(exception.InvalidUUID, + db.instance_floating_address_get_all, + ctxt, 'invalid_uuid') + def test_instance_stringified_ips(self): instance = self.create_instance_with_args() instance = db.instance_update( From 3ea14e8a70a946dbb162ecafa848e4f2fa29772a Mon Sep 17 00:00:00 2001 From: Jeegn Chen Date: Sun, 8 Jun 2014 16:23:36 +0800 Subject: [PATCH 310/486] Fix live-migration failure in FC multipath case Currently, /dev/dm- instead of /dev/mapper/ is used to access multipath FC volumes by Compute Node and multipath_id in connection_info is not maintained properly and may be lost during connection refreshing. This implementation will make source Compute Node and destination Compute Node fail to disconnect/connect to volumes properly and result in live-migration failure. To fix it, /dev/mapper will be used instead of /dev/dm- to access multipath devices, just like iSCSI multipath implementation, and logic to preserve the unique (across Compute Nodes) multipath_id is also added. Change-Id: I17f15852c098af88afd270084c62eb87693c60d4 Closes-Bug: #1327497 --- nova/compute/manager.py | 2 +- nova/storage/linuxscsi.py | 15 ++++++++++----- nova/tests/test_linuxscsi.py | 8 +++++--- nova/tests/virt/libvirt/test_volume.py | 16 ++++++++++++---- nova/tests/virt/test_block_device.py | 4 ++-- nova/virt/block_device.py | 11 +++++++++++ nova/virt/libvirt/volume.py | 6 +++++- 7 files changed, 46 insertions(+), 16 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 660785e1ad..2567a2f268 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4783,7 +4783,7 @@ def _post_live_migration(self, ctxt, instance, # Cleanup source host post live-migration block_device_info = self._get_instance_block_device_info( - ctxt, instance, bdms) + ctxt, instance, bdms=bdms) self.driver.post_live_migration(ctxt, instance, block_device_info, migrate_data) diff --git a/nova/storage/linuxscsi.py b/nova/storage/linuxscsi.py index f261094a13..08577b0f6d 100644 --- a/nova/storage/linuxscsi.py +++ b/nova/storage/linuxscsi.py @@ -14,7 +14,7 @@ """Generic linux scsi subsystem utilities.""" -from nova.i18n import _ +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils @@ -97,7 +97,7 @@ def find_multipath_device(device): (out, err) = utils.execute('multipath', '-l', device, run_as_root=True) except processutils.ProcessExecutionError as exc: - LOG.warn(_("Multipath call failed exit (%(code)s)") + LOG.warn(_LW("Multipath call failed exit (%(code)s)") % {'code': exc.exit_code}) return None @@ -110,15 +110,15 @@ def find_multipath_device(device): # device line output is different depending # on /etc/multipath.conf settings. if info[1][:2] == "dm": - mdev = "/dev/%s" % info[1] mdev_id = info[0] + mdev = '/dev/mapper/%s' % mdev_id elif info[2][:2] == "dm": - mdev = "/dev/%s" % info[2] mdev_id = info[1].replace('(', '') mdev_id = mdev_id.replace(')', '') + mdev = '/dev/mapper/%s' % mdev_id if mdev is None: - LOG.warn(_("Couldn't find multipath device %s"), line) + LOG.warn(_LW("Couldn't find multipath device %s"), line) return None LOG.debug("Found multipath device = %s", mdev) @@ -126,6 +126,11 @@ def find_multipath_device(device): for dev_line in device_lines: if dev_line.find("policy") != -1: continue + if '#' in dev_line: + LOG.warn(_LW('Skip faulty line "%(dev_line)s" of' + ' multipath device %(mdev)s') + % {'mdev': mdev, 'dev_line': dev_line}) + continue dev_line = dev_line.lstrip(' |-`') dev_info = dev_line.split() diff --git a/nova/tests/test_linuxscsi.py b/nova/tests/test_linuxscsi.py index c291e45908..8b1a26a546 100644 --- a/nova/tests/test_linuxscsi.py +++ b/nova/tests/test_linuxscsi.py @@ -59,7 +59,7 @@ def fake_execute2(*cmd, **kwargs): info = linuxscsi.find_multipath_device('/dev/sde') LOG.error("info = %s" % info) - self.assertEqual("/dev/dm-3", info["device"]) + self.assertEqual("/dev/mapper/350002ac20398383d", info["device"]) self.assertEqual("/dev/sde", info['devices'][0]['device']) self.assertEqual("0", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['id']) @@ -90,7 +90,8 @@ def fake_execute(*cmd, **kwargs): info = linuxscsi.find_multipath_device('/dev/sde') LOG.error("info = %s" % info) - self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/mapper/36005076da00638089c000000000004d5", + info["device"]) self.assertEqual("/dev/sde", info['devices'][0]['device']) self.assertEqual("6", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) @@ -118,7 +119,8 @@ def fake_execute(*cmd, **kwargs): info = linuxscsi.find_multipath_device('/dev/sdd') LOG.error("info = %s" % info) - self.assertEqual("/dev/dm-2", info["device"]) + self.assertEqual("/dev/mapper/36005076303ffc48e0000000000000101", + info["device"]) self.assertEqual("/dev/sdd", info['devices'][0]['device']) self.assertEqual("6", info['devices'][0]['host']) self.assertEqual("0", info['devices'][0]['channel']) diff --git a/nova/tests/virt/libvirt/test_volume.py b/nova/tests/virt/libvirt/test_volume.py index c203159b84..bbd7cef3da 100644 --- a/nova/tests/virt/libvirt/test_volume.py +++ b/nova/tests/virt/libvirt/test_volume.py @@ -951,14 +951,22 @@ def test_libvirt_fibrechan_driver(self): mount_device = "vde" conf = libvirt_driver.connect_volume(connection_info, self.disk_info) + self.assertEqual('1234567890', + connection_info['data']['multipath_id']) tree = conf.format_dom() - self.assertEqual(tree.get('type'), 'block') - self.assertEqual(tree.find('./source').get('dev'), - multipath_devname) + self.assertEqual('block', tree.get('type')) + self.assertEqual(multipath_devname, + tree.find('./source').get('dev')) + # Test the scenario where multipath_id is returned + libvirt_driver.disconnect_volume(connection_info, mount_device) + expected_commands = [] + self.assertEqual(expected_commands, self.executes) + # Test the scenario where multipath_id is not returned connection_info["data"]["devices"] = devices["devices"] + del connection_info["data"]["multipath_id"] libvirt_driver.disconnect_volume(connection_info, mount_device) expected_commands = [] - self.assertEqual(self.executes, expected_commands) + self.assertEqual(expected_commands, self.executes) # Should not work for anything other than string, unicode, and list connection_info = self.fibrechan_connection(self.vol, diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py index af727b5d55..278b653fba 100644 --- a/nova/tests/virt/test_block_device.py +++ b/nova/tests/virt/test_block_device.py @@ -436,8 +436,8 @@ def test_refresh_connection(self): instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} connector = {'ip': 'fake_ip', 'host': 'fake_host'} - connection_info = {'data': {}} - expected_conn_info = {'data': {}, + connection_info = {'data': {'multipath_id': 'fake_multipath_id'}} + expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'}, 'serial': 'fake-volume-id-2'} self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save') diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index 0022d23311..4da7f85027 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -17,6 +17,7 @@ from nova import block_device from nova.i18n import _ +from nova.i18n import _LI from nova import objects from nova.objects import base as obj_base from nova.openstack.common import excutils @@ -208,6 +209,14 @@ def _transform(self): except TypeError: self['connection_info'] = None + def _preserve_multipath_id(self, connection_info): + if self['connection_info'] and 'data' in self['connection_info']: + if 'multipath_id' in self['connection_info']['data']: + connection_info['data']['multipath_id'] =\ + self['connection_info']['data']['multipath_id'] + LOG.info(_LI('preserve multipath_id %s'), + connection_info['data']['multipath_id']) + @update_db def attach(self, context, instance, volume_api, virt_driver, do_check_attach=True, do_driver_attach=False): @@ -224,6 +233,7 @@ def attach(self, context, instance, volume_api, virt_driver, connector) if 'serial' not in connection_info: connection_info['serial'] = self.volume_id + self._preserve_multipath_id(connection_info) # If do_driver_attach is False, we will attach a volume to an instance # at boot time. So actual attach is done by instance creation code. @@ -267,6 +277,7 @@ def refresh_connection_info(self, context, instance, connector) if 'serial' not in connection_info: connection_info['serial'] = self.volume_id + self._preserve_multipath_id(connection_info) self['connection_info'] = connection_info def save(self, context): diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index 261216b7b0..3daf3554d7 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -980,7 +980,6 @@ def disconnect_volume(self, connection_info, mount_device): """Detach the volume from instance_name.""" super(LibvirtFibreChannelVolumeDriver, self).disconnect_volume(connection_info, mount_device) - devices = connection_info['data']['devices'] # If this is a multipath device, we need to search again # and make sure we remove all the devices. Some of them @@ -990,6 +989,11 @@ def disconnect_volume(self, connection_info, mount_device): mdev_info = linuxscsi.find_multipath_device(multipath_id) devices = mdev_info['devices'] LOG.debug("devices to remove = %s", devices) + else: + # only needed when multipath-tools work improperly + devices = connection_info['data'].get('devices', []) + LOG.warn(_LW("multipath-tools probably work improperly. " + "devices to remove = %s.") % devices) # There may have been more than 1 device mounted # by the kernel for this volume. We have to remove From c55736d9fc941ae3f00a29e945b8881be7813e52 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 8 Aug 2014 12:29:47 -0400 Subject: [PATCH 311/486] Use v1 as default for cinder_catalog_info In c5402ef4fc509047d513a715a1c14e9b4ba9674f we recently added support for the Cinder v2 client. This change modified the default value of the cinder_catalog_info config such that an end user who was previously using the Cinder V1 API via the default config setting (by not setting it) would have a broken Nova -> cinder configuration upon upgrade. We should hold off on changing the default cinder_catalog_info for one release to allow for proper deprecation. Change-Id: I040b2c87ad0a2be92f31264e293794d97c27c965 Closes-bug: #1354499 --- nova/tests/test_cinder.py | 5 +++-- nova/volume/cinder.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py index d73aeafdf6..0781f62cc2 100644 --- a/nova/tests/test_cinder.py +++ b/nova/tests/test_cinder.py @@ -257,8 +257,6 @@ def setUp(self): "name": "cinder", "endpoints": [{"publicURL": "http://localhost:8776/v1/project_id"}] }] - cinder.CONF.set_override('cinder_catalog_info', - 'volume:cinder:publicURL') self.context = context.RequestContext('username', 'project_id', service_catalog=catalog) cinder.cinderclient(self.context) @@ -332,8 +330,11 @@ def setUp(self): "name": "cinderv2", "endpoints": [{"publicURL": "http://localhost:8776/v2/project_id"}] }] + cinder.CONF.set_override('cinder_catalog_info', + 'volumev2:cinder:publicURL') self.context = context.RequestContext('username', 'project_id', service_catalog=catalog) + cinder.cinderclient(self.context) self.api = cinder.API() diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 488ef1ae38..1ad9d1dd2f 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -36,7 +36,7 @@ cinder_opts = [ cfg.StrOpt('cinder_catalog_info', - default='volumev2:cinder:publicURL', + default='volume:cinder:publicURL', help='Info to match when looking for cinder in the service ' 'catalog. Format is: separated values of the form: ' '::'), From 1498bf94e2617269be5be59556b89a24d56e3e86 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 30 Jun 2014 15:20:49 +0100 Subject: [PATCH 312/486] VMware: Remove ds_util.build_datastore_path() Convert all remaining users of build_datastore_path to use the DatastorePath class, and remove build_datastore_path() and its tests. Change-Id: I73a166a48c0038743213e4feaa95834f9ebc8fbf --- nova/tests/virt/vmwareapi/test_driver_api.py | 119 ++++++++++--------- nova/tests/virt/vmwareapi/test_ds_util.py | 6 - nova/virt/vmwareapi/ds_util.py | 6 - nova/virt/vmwareapi/vmops.py | 10 +- 4 files changed, 68 insertions(+), 73 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index c307ac2766..3e140066eb 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -618,12 +618,13 @@ def test_list_instance_uuids_invalid_uuid(self): self.assertEqual(len(uuids), 0) def _cached_files_exist(self, exists=True): - cache = ('[%s] vmware_base/%s/%s.vmdk' % - (self.ds, self.fake_image_uuid, self.fake_image_uuid)) + cache = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.vmdk' % self.fake_image_uuid) if exists: - self.assertTrue(vmwareapi_fake.get_file(cache)) + self.assertTrue(vmwareapi_fake.get_file(str(cache))) else: - self.assertFalse(vmwareapi_fake.get_file(cache)) + self.assertFalse(vmwareapi_fake.get_file(str(cache))) def test_instance_dir_disk_created(self): """Test image file is cached when even when use_linked_clone @@ -631,47 +632,49 @@ def test_instance_dir_disk_created(self): """ self._create_vm() - inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) - self.assertTrue(vmwareapi_fake.get_file(inst_file_path)) + path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) + self.assertTrue(vmwareapi_fake.get_file(str(path))) self._cached_files_exist() def test_cache_dir_disk_created(self): """Test image disk is cached when use_linked_clone is True.""" self.flags(use_linked_clone=True, group='vmware') self._create_vm() - file = '[%s] vmware_base/%s/%s.vmdk' % (self.ds, self.fake_image_uuid, - self.fake_image_uuid) - root = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds, - self.fake_image_uuid, - self.fake_image_uuid) - self.assertTrue(vmwareapi_fake.get_file(file)) - self.assertTrue(vmwareapi_fake.get_file(root)) + path = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.vmdk' % self.fake_image_uuid) + root = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.80.vmdk' % self.fake_image_uuid) + self.assertTrue(vmwareapi_fake.get_file(str(path))) + self.assertTrue(vmwareapi_fake.get_file(str(root))) def _iso_disk_type_created(self, instance_type='m1.large'): self.image['disk_format'] = 'iso' self._create_vm(instance_type=instance_type) - file = '[%s] vmware_base/%s/%s.iso' % (self.ds, self.fake_image_uuid, - self.fake_image_uuid) - self.assertTrue(vmwareapi_fake.get_file(file)) + path = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.iso' % self.fake_image_uuid) + self.assertTrue(vmwareapi_fake.get_file(str(path))) def test_iso_disk_type_created(self): self._iso_disk_type_created() - vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) - self.assertTrue(vmwareapi_fake.get_file(vmdk_file_path)) + path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) + self.assertTrue(vmwareapi_fake.get_file(str(path))) def test_iso_disk_type_created_with_root_gb_0(self): self._iso_disk_type_created(instance_type='m1.micro') - vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) - self.assertFalse(vmwareapi_fake.get_file(vmdk_file_path)) + path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) + self.assertFalse(vmwareapi_fake.get_file(str(path))) def test_iso_disk_cdrom_attach(self): - self.iso_path = ( - '[%s] vmware_base/%s/%s.iso' % (self.ds, self.fake_image_uuid, - self.fake_image_uuid)) + self.iso_path = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.iso' % self.fake_image_uuid) def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): - self.assertEqual(iso_uploaded_path, self.iso_path) + self.assertEqual(iso_uploaded_path, str(self.iso_path)) self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", fake_attach_cdrom) @@ -681,9 +684,10 @@ def fake_attach_cdrom(vm_ref, instance, data_store_ref, def test_iso_disk_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) self.iso_path = [ - '[%s] vmware_base/%s/%s.iso' % - (self.ds, self.fake_image_uuid, self.fake_image_uuid), - '[%s] fake-config-drive' % self.ds] + ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.iso' % self.fake_image_uuid), + ds_util.DatastorePath(self.ds, 'fake-config-drive')] self.iso_unit_nos = [0, 1] self.iso_index = 0 @@ -693,7 +697,8 @@ def fake_create_config_drive(instance, injected_files, password, def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): - self.assertEqual(iso_uploaded_path, self.iso_path[self.iso_index]) + self.assertEqual(iso_uploaded_path, + str(self.iso_path[self.iso_index])) self.iso_index += 1 self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", @@ -707,7 +712,8 @@ def fake_attach_cdrom(vm_ref, instance, data_store_ref, def test_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) - self.iso_path = '[%s] fake-config-drive' % self.ds + + self.iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive') self.cd_attach_called = False def fake_create_config_drive(instance, injected_files, password, @@ -716,7 +722,7 @@ def fake_create_config_drive(instance, injected_files, password, def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): - self.assertEqual(iso_uploaded_path, self.iso_path) + self.assertEqual(iso_uploaded_path, str(self.iso_path)) self.cd_attach_called = True self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", @@ -833,12 +839,13 @@ def test_spawn_disk_extend(self): self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_extend_exists(self): - root = ('[%s] vmware_base/%s/%s.80.vmdk' % - (self.ds, self.fake_image_uuid, self.fake_image_uuid)) + root = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.80.vmdk' % self.fake_image_uuid) self.root = root def _fake_extend(instance, requested_size, name, dc_ref): - vmwareapi_fake._add_file(self.root) + vmwareapi_fake._add_file(str(self.root)) self.stubs.Set(self.conn._vmops, '_extend_virtual_disk', _fake_extend) @@ -847,7 +854,7 @@ def _fake_extend(instance, requested_size, name, dc_ref): info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) - self.assertTrue(vmwareapi_fake.get_file(root)) + self.assertTrue(vmwareapi_fake.get_file(str(root))) def test_spawn_disk_extend_sparse(self): self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') @@ -1466,12 +1473,12 @@ def destroy_rescued(self, fake_method): ) as (fake_detach, fake_power_on): self.instance['vm_state'] = vm_states.RESCUED self.conn.destroy(self.context, self.instance, self.network_info) - inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) - self.assertFalse(vmwareapi_fake.get_file(inst_path)) - rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds, - self.uuid, - self.uuid) - self.assertFalse(vmwareapi_fake.get_file(rescue_file_path)) + inst_path = ds_util.DatastorePath(self.ds, self.uuid, + '%s.vmdk' % self.uuid) + self.assertFalse(vmwareapi_fake.get_file(str(inst_path))) + rescue_file_path = ds_util.DatastorePath( + self.ds, '%s-rescue' % self.uuid, '%s-rescue.vmdk' % self.uuid) + self.assertFalse(vmwareapi_fake.get_file(str(rescue_file_path))) # Unrescue does not power on with destroy self.assertFalse(fake_power_on.called) @@ -1564,7 +1571,8 @@ def fake_create_config_drive(instance, injected_files, password, data_store_name, folder, instance_uuid, cookies): self.assertTrue(uuidutils.is_uuid_like(instance['uuid'])) - return "[%s] %s/fake.iso" % (data_store_name, instance_uuid) + return str(ds_util.DatastorePath(data_store_name, + instance_uuid, 'fake.iso')) self.stubs.Set(self.conn._vmops, '_create_config_drive', fake_create_config_drive) @@ -1609,12 +1617,13 @@ def _fake_http_write(host, data_center_name, datastore_name, def test_rescue(self): self._rescue() - inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) - self.assertTrue(vmwareapi_fake.get_file(inst_file_path)) - rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds, - self.uuid, - self.uuid) - self.assertTrue(vmwareapi_fake.get_file(rescue_file_path)) + inst_file_path = ds_util.DatastorePath(self.ds, self.uuid, + '%s.vmdk' % self.uuid) + self.assertTrue(vmwareapi_fake.get_file(str(inst_file_path))) + rescue_file_path = ds_util.DatastorePath(self.ds, + '%s-rescue' % self.uuid, + '%s-rescue.vmdk' % self.uuid) + self.assertTrue(vmwareapi_fake.get_file(str(rescue_file_path))) def test_rescue_with_config_drive(self): self.flags(force_config_drive=True) @@ -1984,13 +1993,13 @@ def _fake_get_timestamp_filename(fake): _fake_get_timestamp_filename) def _timestamp_file_exists(self, exists=True): - timestamp = ('[%s] vmware_base/%s/%s/' % - (self.ds, self.fake_image_uuid, - self._get_timestamp_filename())) + timestamp = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + self._get_timestamp_filename() + '/') if exists: - self.assertTrue(vmwareapi_fake.get_file(timestamp)) + self.assertTrue(vmwareapi_fake.get_file(str(timestamp))) else: - self.assertFalse(vmwareapi_fake.get_file(timestamp)) + self.assertFalse(vmwareapi_fake.get_file(str(timestamp))) def _image_aging_image_marked_for_deletion(self): self._create_vm(uuid=uuidutils.generate_uuid()) @@ -2017,9 +2026,9 @@ def test_timestamp_file_removed_spawn(self): def test_timestamp_file_removed_aging(self): self._timestamp_file_removed() ts = self._get_timestamp_filename() - ts_path = ('[%s] vmware_base/%s/%s/' % - (self.ds, self.fake_image_uuid, ts)) - vmwareapi_fake._add_file(ts_path) + ts_path = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, ts + '/') + vmwareapi_fake._add_file(str(ts_path)) self._timestamp_file_exists() all_instances = [self.instance] self.conn.manage_image_cache(self.context, all_instances) diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py index 4e158aeeaf..610fc9110c 100644 --- a/nova/tests/virt/vmwareapi/test_ds_util.py +++ b/nova/tests/virt/vmwareapi/test_ds_util.py @@ -37,12 +37,6 @@ def tearDown(self): super(DsUtilTestCase, self).tearDown() fake.reset() - def test_build_datastore_path(self): - path = ds_util.build_datastore_path('ds', 'folder') - self.assertEqual('[ds] folder', path) - path = ds_util.build_datastore_path('ds', 'folder/file') - self.assertEqual('[ds] folder/file', path) - def test_file_delete(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('DeleteDatastoreFile_Task', method) diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index 8db0e665b0..3d53ceccd3 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -168,12 +168,6 @@ def parse(cls, datastore_path): return cls(datastore_name, path.strip()) -# TODO(vui): remove after converting all callers to use Datastore.build_path() -def build_datastore_path(datastore_name, path): - """Build the datastore compliant path.""" - return str(DatastorePath(datastore_name, path)) - - # NOTE(mdbooth): this convenience function is temporarily duplicated in # vm_util. The correct fix is to handle paginated results as they are returned # from the relevant vim_util function. However, vim_util is currently diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 324137de24..97e674d0ba 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -152,8 +152,7 @@ def _delete_datastore_file(self, instance, datastore_path, dc_ref): exc_info=True) def _get_vmdk_path(self, ds_name, folder, name): - path = "%s/%s.vmdk" % (folder, name) - return ds_util.build_datastore_path(ds_name, path) + return str(ds_util.DatastorePath(ds_name, folder, '%s.vmdk' % name)) def _get_disk_format(self, image_meta): disk_format = image_meta.get('disk_format') @@ -547,13 +546,12 @@ def _get_image_properties(root_size): dc_info.name, instance.uuid, cookies) - uploaded_iso_path = ds_util.build_datastore_path( - datastore.name, - uploaded_iso_path) + uploaded_iso_path = ds_util.DatastorePath(datastore.name, + uploaded_iso_path) self._attach_cdrom_to_vm( vm_ref, instance, datastore.ref, - uploaded_iso_path) + str(uploaded_iso_path)) else: # Attach the root disk to the VM. From a55e87d5b4517106fcea22b7642b9690cde78b47 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Tue, 8 Jul 2014 11:55:10 +0100 Subject: [PATCH 313/486] VMware: test_driver_api: Use local variables in closures Some closures were unecessarily storing constants used by closures in the test object. This change converts them to use local variables instead. Values which are modified remain in the test object. Additionally, 1 unused value is removed. TrivialFix Change-Id: I0164bf23f4d51505871cee8db170b73534410c4a --- nova/tests/virt/vmwareapi/test_driver_api.py | 21 +++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 3e140066eb..6a3aefe248 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -668,13 +668,13 @@ def test_iso_disk_type_created_with_root_gb_0(self): self.assertFalse(vmwareapi_fake.get_file(str(path))) def test_iso_disk_cdrom_attach(self): - self.iso_path = ds_util.DatastorePath(self.ds, 'vmware_base', - self.fake_image_uuid, - '%s.iso' % self.fake_image_uuid) + iso_path = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.iso' % self.fake_image_uuid) def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): - self.assertEqual(iso_uploaded_path, str(self.iso_path)) + self.assertEqual(iso_uploaded_path, str(iso_path)) self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", fake_attach_cdrom) @@ -683,12 +683,11 @@ def fake_attach_cdrom(vm_ref, instance, data_store_ref, def test_iso_disk_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) - self.iso_path = [ + iso_path = [ ds_util.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.iso' % self.fake_image_uuid), ds_util.DatastorePath(self.ds, 'fake-config-drive')] - self.iso_unit_nos = [0, 1] self.iso_index = 0 def fake_create_config_drive(instance, injected_files, password, @@ -697,8 +696,7 @@ def fake_create_config_drive(instance, injected_files, password, def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): - self.assertEqual(iso_uploaded_path, - str(self.iso_path[self.iso_index])) + self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index])) self.iso_index += 1 self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", @@ -713,7 +711,7 @@ def fake_attach_cdrom(vm_ref, instance, data_store_ref, def test_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) - self.iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive') + iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive') self.cd_attach_called = False def fake_create_config_drive(instance, injected_files, password, @@ -722,7 +720,7 @@ def fake_create_config_drive(instance, injected_files, password, def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): - self.assertEqual(iso_uploaded_path, str(self.iso_path)) + self.assertEqual(iso_uploaded_path, str(iso_path)) self.cd_attach_called = True self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", @@ -842,10 +840,9 @@ def test_spawn_disk_extend_exists(self): root = ds_util.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, '%s.80.vmdk' % self.fake_image_uuid) - self.root = root def _fake_extend(instance, requested_size, name, dc_ref): - vmwareapi_fake._add_file(str(self.root)) + vmwareapi_fake._add_file(str(root)) self.stubs.Set(self.conn._vmops, '_extend_virtual_disk', _fake_extend) From 07c9c1ba3ae310b725112804dbc2398630ec27a5 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 31 Jul 2014 22:14:09 -0400 Subject: [PATCH 314/486] docs - Fix docstring issues in virt tree Fix the following errors in several files. ERROR: Unexpected indentation. ERROR: Unknown interpreted text role "paramref". WARNING: Block quote ends without a blank line; unexpected unindent. WARNING: Definition list ends without a blank line; unexpected unindent. WARNING: Enumerated list ends without a blank line; unexpected unindent. WARNING: Field list ends without a blank line; unexpected unindent. Specifically the "Unknown interpreted text role" was fixed by adding a separate import instead of importing 'text' directly. Others were indentation, new lines, or adding lists. Change-Id: I241ce1ca9831d9df00d022297ea622953a6fdc60 --- .../versions/010_add_preserve_ephemeral.py | 6 ++- nova/virt/baremetal/pxe.py | 2 +- nova/virt/baremetal/tilera.py | 11 ++--- nova/virt/driver.py | 29 +++++++------ nova/virt/hyperv/vhdutils.py | 30 ++++++------- nova/virt/libvirt/driver.py | 4 +- nova/virt/libvirt/imagebackend.py | 6 ++- nova/virt/vmwareapi/ds_util.py | 42 ++++++++++--------- nova/virt/vmwareapi/vm_util.py | 23 +++++----- nova/virt/vmwareapi/vmops.py | 31 ++++++++------ nova/virt/xenapi/client/objects.py | 2 +- nova/virt/xenapi/driver.py | 6 +-- nova/virt/xenapi/vm_utils.py | 4 +- 13 files changed, 108 insertions(+), 88 deletions(-) diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py index c95c66d168..2cd5745327 100644 --- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py +++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py @@ -13,7 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import Column, MetaData, Boolean, Table, text +from sqlalchemy import Column, MetaData, Boolean, Table +from sqlalchemy.sql import expression COLUMN_NAME = 'preserve_ephemeral' @@ -25,7 +26,8 @@ def upgrade(migrate_engine): meta.bind = migrate_engine t = Table(TABLE_NAME, meta, autoload=True) - default = text('0') if migrate_engine.name == 'sqlite' else text('false') + default = (expression.text('0') if migrate_engine.name == 'sqlite' + else expression.text('false')) preserve_ephemeral_col = Column(COLUMN_NAME, Boolean, server_default=default) t.create_column(preserve_ephemeral_col) diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index 72d5a02169..0266d347ee 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -195,7 +195,7 @@ def get_tftp_image_info(instance, flavor): Raises NovaException if - instance does not contain kernel_id or ramdisk_id - deploy_kernel_id or deploy_ramdisk_id can not be read from - flavor['extra_specs'] and defaults are not set + flavor['extra_specs'] and defaults are not set """ image_info = { diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py index 6bd96c716a..fa324782bf 100644 --- a/nova/virt/baremetal/tilera.py +++ b/nova/virt/baremetal/tilera.py @@ -233,12 +233,13 @@ def activate_bootloader(self, context, node, instance, network_info): This method writes the instances config file, and then creates symlinks for each MAC address in the instance. - By default, the complete layout looks like this: + By default, the complete layout looks like this:: + + /tftpboot/ + ./{uuid}/ + kernel + ./fs_node_id/ - /tftpboot/ - ./{uuid}/ - kernel - ./fs_node_id/ """ get_tftp_image_info(instance) (root_mb, swap_mb) = get_partition_sizes(instance) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index b64459e2ad..035714c780 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -1035,11 +1035,13 @@ def get_host_cpu_stats(self): """Get the currently known host CPU stats. :returns: a dict containing the CPU stat info, eg: - {'kernel': kern, - 'idle': idle, - 'user': user, - 'iowait': wait, - 'frequency': freq}, + + | {'kernel': kern, + | 'idle': idle, + | 'user': user, + | 'iowait': wait, + | 'frequency': freq}, + where kern and user indicate the cumulative CPU time (nanoseconds) spent by kernel and user processes respectively, idle indicates the cumulative idle CPU time @@ -1047,6 +1049,7 @@ def get_host_cpu_stats(self): time (nanoseconds), since the host is booting up; freq indicates the current CPU frequency (MHz). All values are long integers. + """ raise NotImplementedError() @@ -1135,13 +1138,15 @@ def dhcp_options_for_instance(self, instance): client API. :return: None, or a set of DHCP options, eg: - [{'opt_name': 'bootfile-name', - 'opt_value': '/tftpboot/path/to/config'}, - {'opt_name': 'server-ip-address', - 'opt_value': '1.2.3.4'}, - {'opt_name': 'tftp-server', - 'opt_value': '1.2.3.4'} - ] + + | [{'opt_name': 'bootfile-name', + | 'opt_value': '/tftpboot/path/to/config'}, + | {'opt_name': 'server-ip-address', + | 'opt_value': '1.2.3.4'}, + | {'opt_name': 'tftp-server', + | 'opt_value': '1.2.3.4'} + | ] + """ pass diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py index af611efb6a..55c3a45658 100644 --- a/nova/virt/hyperv/vhdutils.py +++ b/nova/virt/hyperv/vhdutils.py @@ -118,21 +118,21 @@ def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True): def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size): """Fixed VHD size = Data Block size + 512 bytes - Dynamic_VHD_size = Dynamic Disk Header - + Copy of hard disk footer - + Hard Disk Footer - + Data Block - + BAT - Dynamic Disk header fields - Copy of hard disk footer (512 bytes) - Dynamic Disk Header (1024 bytes) - BAT (Block Allocation table) - Data Block 1 - Data Block 2 - Data Block n - Hard Disk Footer (512 bytes) - Default block size is 2M - BAT entry size is 4byte + | Dynamic_VHD_size = Dynamic Disk Header + | + Copy of hard disk footer + | + Hard Disk Footer + | + Data Block + | + BAT + | Dynamic Disk header fields + | Copy of hard disk footer (512 bytes) + | Dynamic Disk Header (1024 bytes) + | BAT (Block Allocation table) + | Data Block 1 + | Data Block 2 + | Data Block n + | Hard Disk Footer (512 bytes) + | Default block size is 2M + | BAT entry size is 4byte """ base_vhd_info = self.get_vhd_info(vhd_path) vhd_type = base_vhd_info['Type'] diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 1675a92c4a..7699a88231 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1851,8 +1851,8 @@ def volume_snapshot_create(self, context, instance, volume_id, - snapshot_id : ID of snapshot - type : qcow2 / - new_file : qcow2 file created by Cinder which - becomes the VM's active image after - the snapshot is complete + becomes the VM's active image after + the snapshot is complete """ LOG.debug("volume_snapshot_create: create_info: %(c_info)s", diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 184d7fe742..d13dc2a695 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -110,9 +110,10 @@ def create_image(self, prepare_template, base, size, *args, **kwargs): Contains specific behavior for each image type. :prepare_template: function, that creates template. - Should accept `target` argument. + Should accept `target` argument. :base: Template name :size: Size of created image in bytes + """ pass @@ -656,7 +657,8 @@ def image(self, instance, disk_name, image_type=None): :instance: Instance name. :name: Image name. :image_type: Image type. - Optional, is CONF.libvirt.images_type by default. + Optional, is CONF.libvirt.images_type by default. + """ backend = self.backend(image_type) return backend(instance=instance, disk_name=disk_name) diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py index 8db0e665b0..1cf523a917 100644 --- a/nova/virt/vmwareapi/ds_util.py +++ b/nova/virt/vmwareapi/ds_util.py @@ -94,12 +94,14 @@ class DatastorePath(object): file path to a virtual disk. Note: - - Datastore path representations always uses forward slash as separator + + * Datastore path representations always uses forward slash as separator (hence the use of the posixpath module). - - Datastore names are enclosed in square brackets. - - Path part of datastore path is relative to the root directory + * Datastore names are enclosed in square brackets. + * Path part of datastore path is relative to the root directory of the datastore, and is always separated from the [ds_name] part with a single space. + """ VMDK_EXTENSION = "vmdk" @@ -338,22 +340,24 @@ def file_move(session, dc_ref, src_file, dst_file): The list of possible faults that the server can return on error include: - - CannotAccessFile: Thrown if the source file or folder cannot be - moved because of insufficient permissions. - - FileAlreadyExists: Thrown if a file with the given name already - exists at the destination. - - FileFault: Thrown if there is a generic file error - - FileLocked: Thrown if the source file or folder is currently - locked or in use. - - FileNotFound: Thrown if the file or folder specified by sourceName - is not found. - - InvalidDatastore: Thrown if the operation cannot be performed on - the source or destination datastores. - - NoDiskSpace: Thrown if there is not enough space available on the - destination datastore. - - RuntimeFault: Thrown if any type of runtime fault is thrown that - is not covered by the other faults; for example, - a communication error. + + * CannotAccessFile: Thrown if the source file or folder cannot be + moved because of insufficient permissions. + * FileAlreadyExists: Thrown if a file with the given name already + exists at the destination. + * FileFault: Thrown if there is a generic file error + * FileLocked: Thrown if the source file or folder is currently + locked or in use. + * FileNotFound: Thrown if the file or folder specified by sourceName + is not found. + * InvalidDatastore: Thrown if the operation cannot be performed on + the source or destination datastores. + * NoDiskSpace: Thrown if there is not enough space available on the + destination datastore. + * RuntimeFault: Thrown if any type of runtime fault is thrown that + is not covered by the other faults; for example, + a communication error. + """ LOG.debug("Moving file from %(src)s to %(dst)s.", {'src': src_file, 'dst': dst_file}) diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 0f90ab8a44..868a8bf6ba 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1048,9 +1048,9 @@ def propset_dict(propset): that are returned by the VMware API. You can read more about these at: - http://pubs.vmware.com/vsphere-51/index.jsp - #com.vmware.wssdk.apiref.doc/ - vmodl.query.PropertyCollector.ObjectContent.html + | http://pubs.vmware.com/vsphere-51/index.jsp + | #com.vmware.wssdk.apiref.doc/ + | vmodl.query.PropertyCollector.ObjectContent.html :param propset: a property "set" from ObjectContent :return: dictionary representing property set @@ -1186,14 +1186,15 @@ def get_dict_mor(session, list_obj): { value = "domain-1002", _type = "ClusterComputeResource" } Output data format: - dict_mors = { - 'respool-1001': { 'cluster_mor': clusterMor, - 'res_pool_mor': resourcePoolMor, - 'name': display_name }, - 'domain-1002': { 'cluster_mor': clusterMor, - 'res_pool_mor': resourcePoolMor, - 'name': display_name }, - } + | dict_mors = { + | 'respool-1001': { 'cluster_mor': clusterMor, + | 'res_pool_mor': resourcePoolMor, + | 'name': display_name }, + | 'domain-1002': { 'cluster_mor': clusterMor, + | 'res_pool_mor': resourcePoolMor, + | 'name': display_name }, + | } + """ dict_mors = {} for obj_ref, path in list_obj: diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 324137de24..15af1feb35 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -168,20 +168,25 @@ def spawn(self, context, instance, image_meta, injected_files, Steps followed are: - 1. Create a VM with no disk and the specifics in the instance object + #. Create a VM with no disk and the specifics in the instance object like RAM size. - 2. For flat disk - 2.1. Create a dummy vmdk of the size of the disk file that is to be - uploaded. This is required just to create the metadata file. - 2.2. Delete the -flat.vmdk file created in the above step and retain - the metadata .vmdk file. - 2.3. Upload the disk file. - 3. For sparse disk - 3.1. Upload the disk file to a -sparse.vmdk file. - 3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk. - 3.3. Delete the -sparse.vmdk file. - 4. Attach the disk to the VM by reconfiguring the same. - 5. Power on the VM. + #. For flat disk + + #. Create a dummy vmdk of the size of the disk file that is to be + uploaded. This is required just to create the metadata file. + #. Delete the -flat.vmdk file created in the above step and retain + the metadata .vmdk file. + #. Upload the disk file. + + #. For sparse disk + + #. Upload the disk file to a -sparse.vmdk file. + #. Copy/Clone the -sparse.vmdk file to a thin vmdk. + #. Delete the -sparse.vmdk file. + + #. Attach the disk to the VM by reconfiguring the same. + #. Power on the VM. + """ ebs_root = False if block_device_info: diff --git a/nova/virt/xenapi/client/objects.py b/nova/virt/xenapi/client/objects.py index a358d41b8e..5cc91eb4c7 100644 --- a/nova/virt/xenapi/client/objects.py +++ b/nova/virt/xenapi/client/objects.py @@ -46,7 +46,7 @@ class XenAPISessionObject(object): to use get_all(), but this often leads to races as objects get deleted under your feet. It is preferable to use the undocumented: * vms = session.VM.get_all_records_where( - 'field "is_control_domain"="true"') + 'field "is_control_domain"="true"') """ diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 9cd6fd4be2..3019aad7ec 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -584,7 +584,8 @@ def post_live_migration_at_destination(self, context, instance, nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param network_info: instance network information - :param : block_migration: if true, post operation of block_migration. + :param block_migration: if true, post operation of block_migration. + """ self._vmops.post_live_migration_at_destination(context, instance, network_info, block_device_info, block_device_info) @@ -677,7 +678,6 @@ def resume_state_on_host_boot(self, context, instance, network_info, def get_per_instance_usage(self): """Get information about instance resource usage. - :returns: dict of nova uuid => dict of usage - info + :returns: dict of nova uuid => dict of usage info """ return self._vmops.get_per_instance_usage() diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d22d1e07ce..e7106197dc 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1718,8 +1718,8 @@ def lookup_vm_vdis(session, vm_ref): def lookup(session, name_label, check_rescue=False): """Look the instance up and return it if available. - :param check_rescue: if True will return the 'name'-rescue vm if it - exists, instead of just 'name' + :param:check_rescue: if True will return the 'name'-rescue vm if it + exists, instead of just 'name' """ if check_rescue: result = lookup(session, name_label + '-rescue', False) From 6a9fe989e8d20ba43ed1a2bf318bc41b745f318e Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 31 Jul 2014 22:55:50 -0400 Subject: [PATCH 315/486] docs - Fix exception in docs generation We need to pass the flavor_id when creating the side_effect using FlavorNotFound exception Change-Id: Id5d0ac387d2dca2dc70dabba173fc53972751236 Closes-Bug: #1351127 --- .../api/openstack/compute/plugins/v3/test_server_actions.py | 3 ++- nova/tests/api/openstack/compute/test_server_actions.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index 3f03cb084a..d4bd9369ed 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -690,7 +690,8 @@ def test_resize_raises_cannot_resize_disk(self, mock_resize): req, FAKE_UUID, body) @mock.patch('nova.compute.api.API.resize', - side_effect=exception.FlavorNotFound(reason='')) + side_effect=exception.FlavorNotFound(reason='', + flavor_id='fake_id')) def test_resize_raises_flavor_not_found(self, mock_resize): body = dict(resize=dict(flavor_ref="http://localhost/3")) req = fakes.HTTPRequestV3.blank(self.url) diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 4a37d11ee1..ec42686298 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -843,7 +843,8 @@ def test_resize_raises_cannot_resize_disk(self, mock_resize): req, FAKE_UUID, body) @mock.patch('nova.compute.api.API.resize', - side_effect=exception.FlavorNotFound(reason='')) + side_effect=exception.FlavorNotFound(reason='', + flavor_id='fake_id')) def test_resize_raises_flavor_not_found(self, mock_resize): body = dict(resize=dict(flavorRef="http://localhost/3")) req = fakes.HTTPRequest.blank(self.url) From 11aaf21d9e239c0b017a89f82f678d264f0d403b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 1 Aug 2014 10:33:41 -0400 Subject: [PATCH 316/486] docs - Fix errors,warnings from document generation SEVERE: Unexpected section title. ERROR: Unexpected indentation. WARNING: Block quote ends without a blank line; unexpected unindent. WARNING: Definition list ends without a blank line; unexpected unindent. WARNING: Field list ends without a blank line; unexpected unindent. WARNING: Inline emphasis start-string without end-string. WARNING: Inline interpreted text or phrase reference start-string without end-string. WARNING: Inline strong start-string without end-string. Partial-Bug: #1351350 Change-Id: I661e0e32519f8e4de3325efd10242824015ed03d --- nova/api/openstack/compute/contrib/hosts.py | 68 +++++++++--------- .../api/openstack/compute/plugins/v3/hosts.py | 69 ++++++++++--------- nova/compute/resource_tracker.py | 6 +- nova/db/sqlalchemy/api.py | 43 ++++++------ nova/db/sqlalchemy/utils.py | 6 +- nova/hooks.py | 32 ++++----- nova/image/api.py | 4 +- nova/keymgr/key_mgr.py | 4 +- nova/notifications.py | 13 ++-- nova/objects/base.py | 6 +- nova/objects/instance.py | 27 ++++---- nova/objects/pci_device.py | 14 ++-- nova/openstack/common/network_utils.py | 15 ++-- nova/openstack/common/report/report.py | 6 +- nova/pci/pci_request.py | 54 ++++++++------- .../filters/isolated_hosts_filter.py | 22 +++--- .../filters/pci_passthrough_filter.py | 14 ++-- nova/scheduler/filters/trusted_filter.py | 11 +-- .../compute/plugins/v3/test_servers.py | 18 ++--- .../api/openstack/compute/test_servers.py | 16 ++--- nova/tests/compute/test_resource_tracker.py | 5 +- nova/tests/db/test_migrations.py | 16 ++--- nova/tests/image_fixtures.py | 21 +++--- 23 files changed, 259 insertions(+), 231 deletions(-) diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py index cb2303799a..6a08836b71 100644 --- a/nova/api/openstack/compute/contrib/hosts.py +++ b/nova/api/openstack/compute/contrib/hosts.py @@ -97,39 +97,41 @@ def __init__(self): @wsgi.serializers(xml=HostIndexTemplate) def index(self, req): """Returns a dict in the format: - {'hosts': [{'host_name': 'some.host.name', - 'service': 'cells', - 'zone': 'internal'}, - {'host_name': 'some.other.host.name', - 'service': 'cells', - 'zone': 'internal'}, - {'host_name': 'some.celly.host.name', - 'service': 'cells', - 'zone': 'internal'}, - {'host_name': 'console1.host.com', - 'service': 'consoleauth', - 'zone': 'internal'}, - {'host_name': 'network1.host.com', - 'service': 'network', - 'zone': 'internal'}, - {'host_name': 'netwwork2.host.com', - 'service': 'network', - 'zone': 'internal'}, - {'host_name': 'compute1.host.com', - 'service': 'compute', - 'zone': 'nova'}, - {'host_name': 'compute2.host.com', - 'service': 'compute', - 'zone': 'nova'}, - {'host_name': 'sched1.host.com', - 'service': 'scheduler', - 'zone': 'internal'}, - {'host_name': 'sched2.host.com', - 'service': 'scheduler', - 'zone': 'internal'}, - {'host_name': 'vol1.host.com', - 'service': 'volume'}, - 'zone': 'internal']} + + | {'hosts': [{'host_name': 'some.host.name', + | 'service': 'cells', + | 'zone': 'internal'}, + | {'host_name': 'some.other.host.name', + | 'service': 'cells', + | 'zone': 'internal'}, + | {'host_name': 'some.celly.host.name', + | 'service': 'cells', + | 'zone': 'internal'}, + | {'host_name': 'console1.host.com', + | 'service': 'consoleauth', + | 'zone': 'internal'}, + | {'host_name': 'network1.host.com', + | 'service': 'network', + | 'zone': 'internal'}, + | {'host_name': 'netwwork2.host.com', + | 'service': 'network', + | 'zone': 'internal'}, + | {'host_name': 'compute1.host.com', + | 'service': 'compute', + | 'zone': 'nova'}, + | {'host_name': 'compute2.host.com', + | 'service': 'compute', + | 'zone': 'nova'}, + | {'host_name': 'sched1.host.com', + | 'service': 'scheduler', + | 'zone': 'internal'}, + | {'host_name': 'sched2.host.com', + | 'service': 'scheduler', + | 'zone': 'internal'}, + | {'host_name': 'vol1.host.com', + | 'service': 'volume'}, + | 'zone': 'internal']} + """ context = req.environ['nova.context'] authorize(context) diff --git a/nova/api/openstack/compute/plugins/v3/hosts.py b/nova/api/openstack/compute/plugins/v3/hosts.py index 8990aa873a..e1b3399f75 100644 --- a/nova/api/openstack/compute/plugins/v3/hosts.py +++ b/nova/api/openstack/compute/plugins/v3/hosts.py @@ -39,41 +39,42 @@ def __init__(self): @extensions.expected_errors(()) def index(self, req): - """:returns: A dict in the format: + """Returns a dict in the format + + | {'hosts': [{'host_name': 'some.host.name', + | 'service': 'cells', + | 'zone': 'internal'}, + | {'host_name': 'some.other.host.name', + | 'service': 'cells', + | 'zone': 'internal'}, + | {'host_name': 'some.celly.host.name', + | 'service': 'cells', + | 'zone': 'internal'}, + | {'host_name': 'console1.host.com', + | 'service': 'consoleauth', + | 'zone': 'internal'}, + | {'host_name': 'network1.host.com', + | 'service': 'network', + | 'zone': 'internal'}, + | {'host_name': 'netwwork2.host.com', + | 'service': 'network', + | 'zone': 'internal'}, + | {'host_name': 'compute1.host.com', + | 'service': 'compute', + | 'zone': 'nova'}, + | {'host_name': 'compute2.host.com', + | 'service': 'compute', + | 'zone': 'nova'}, + | {'host_name': 'sched1.host.com', + | 'service': 'scheduler', + | 'zone': 'internal'}, + | {'host_name': 'sched2.host.com', + | 'service': 'scheduler', + | 'zone': 'internal'}, + | {'host_name': 'vol1.host.com', + | 'service': 'volume'}, + | 'zone': 'internal']} - {'hosts': [{'host_name': 'some.host.name', - 'service': 'cells', - 'zone': 'internal'}, - {'host_name': 'some.other.host.name', - 'service': 'cells', - 'zone': 'internal'}, - {'host_name': 'some.celly.host.name', - 'service': 'cells', - 'zone': 'internal'}, - {'host_name': 'console1.host.com', - 'service': 'consoleauth', - 'zone': 'internal'}, - {'host_name': 'network1.host.com', - 'service': 'network', - 'zone': 'internal'}, - {'host_name': 'netwwork2.host.com', - 'service': 'network', - 'zone': 'internal'}, - {'host_name': 'compute1.host.com', - 'service': 'compute', - 'zone': 'nova'}, - {'host_name': 'compute2.host.com', - 'service': 'compute', - 'zone': 'nova'}, - {'host_name': 'sched1.host.com', - 'service': 'scheduler', - 'zone': 'internal'}, - {'host_name': 'sched2.host.com', - 'service': 'scheduler', - 'zone': 'internal'}, - {'host_name': 'vol1.host.com', - 'service': 'volume'}, - 'zone': 'internal']} """ context = req.environ['nova.context'] authorize(context) diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 5dab024fef..680bf6c061 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -145,10 +145,10 @@ def resize_claim(self, context, instance, instance_type, limits=None): :param instance: instance object to reserve resources for :param instance_type: new instance_type being resized to :param limits: Dict of oversubscription limits for memory, disk, - and CPUs. + and CPUs :returns: A Claim ticket representing the reserved resources. This - should be turned into finalize a resource claim or free - resources after the compute operation is finished. + should be turned into finalize a resource claim or free + resources after the compute operation is finished. """ if self.disabled: # compute_driver doesn't support resource tracking, just diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6bd1db5ca4..f92e498a68 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1844,32 +1844,33 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, Depending on the name of a filter, matching for that filter is performed using either exact matching or as regular expression - matching. Exact matching is applied for the following filters: + matching. Exact matching is applied for the following filters:: - ['project_id', 'user_id', 'image_ref', - 'vm_state', 'instance_type_id', 'uuid', - 'metadata', 'host', 'system_metadata'] + | ['project_id', 'user_id', 'image_ref', + | 'vm_state', 'instance_type_id', 'uuid', + | 'metadata', 'host', 'system_metadata'] A third type of filter (also using exact matching), filters based on instance metadata tags when supplied under a special - key named 'filter'. - - filters = { - 'filter': [ - {'name': 'tag-key', 'value': ''}, - {'name': 'tag-value', 'value': ''}, - {'name': 'tag:', 'value': ''} - ] - } - - Special keys are used to tweek the query further: - - 'changes-since' - only return instances updated after - 'deleted' - only return (or exclude) deleted instances - 'soft_deleted' - modify behavior of 'deleted' to either - include or exclude instances whose - vm_state is SOFT_DELETED. + key named 'filter':: + + | filters = { + | 'filter': [ + | {'name': 'tag-key', 'value': ''}, + | {'name': 'tag-value', 'value': ''}, + | {'name': 'tag:', 'value': ''} + | ] + | } + + Special keys are used to tweek the query further:: + + | 'changes-since' - only return instances updated after + | 'deleted' - only return (or exclude) deleted instances + | 'soft_deleted' - modify behavior of 'deleted' to either + | include or exclude instances whose + | vm_state is SOFT_DELETED. + """ # NOTE(mriedem): If the limit is 0 there is no point in even going # to the database since nothing is going to be returned anyway. diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py index fcb33922a2..eafe3c2481 100644 --- a/nova/db/sqlalchemy/utils.py +++ b/nova/db/sqlalchemy/utils.py @@ -93,10 +93,8 @@ def create_shadow_table(migrate_engine, table_name=None, table=None, :param table_name: Autoload table with this name and create shadow table :param table: Autoloaded table, so just create corresponding shadow table. :param col_name_col_instance: contains pair column_name=column_instance. - column_instance is instance of Column. These params - are required only for columns that have unsupported - types by sqlite. For example BigInteger. - + column_instance is instance of Column. These params are required only for + columns that have unsupported types by sqlite. For example BigInteger. :returns: The created shadow_table object. """ meta = MetaData(bind=migrate_engine) diff --git a/nova/hooks.py b/nova/hooks.py index 3c67bb3aed..735b89338b 100644 --- a/nova/hooks.py +++ b/nova/hooks.py @@ -23,22 +23,22 @@ Hook objects are loaded by HookLoaders. Each named hook may invoke multiple Hooks. -Example Hook object: - -class MyHook(object): - def pre(self, *args, **kwargs): - # do stuff before wrapped callable runs - - def post(self, rv, *args, **kwargs): - # do stuff after wrapped callable runs - -Example Hook object with function parameters: - -class MyHookWithFunction(object): - def pre(self, f, *args, **kwargs): - # do stuff with wrapped function info - def post(self, f, *args, **kwards): - # do stuff with wrapped function info +Example Hook object:: + + | class MyHook(object): + | def pre(self, *args, **kwargs): + | # do stuff before wrapped callable runs + | + | def post(self, rv, *args, **kwargs): + | # do stuff after wrapped callable runs + +Example Hook object with function parameters:: + + | class MyHookWithFunction(object): + | def pre(self, f, *args, **kwargs): + | # do stuff with wrapped function info + | def post(self, f, *args, **kwargs): + | # do stuff with wrapped function info """ diff --git a/nova/image/api.py b/nova/image/api.py index 1b5db39f85..a43fdb156a 100644 --- a/nova/image/api.py +++ b/nova/image/api.py @@ -61,8 +61,8 @@ def get_all(self, context, **kwargs): are owned by the requesting user in the ACTIVE status are returned. :param context: The `nova.context.Context` object for the request - :param **kwargs: A dictionary of filter and pagination values that - may be passed to the underlying image info driver. + :param kwargs: A dictionary of filter and pagination values that + may be passed to the underlying image info driver. """ session = self._get_session(context) return session.detail(context, **kwargs) diff --git a/nova/keymgr/key_mgr.py b/nova/keymgr/key_mgr.py index 4fb4f07bc0..c020ca2474 100644 --- a/nova/keymgr/key_mgr.py +++ b/nova/keymgr/key_mgr.py @@ -60,8 +60,10 @@ def copy_key(self, ctxt, key_id, **kwargs): the specified context does not permit copying keys, then a NotAuthorized error should be raised. - Implementation note: This method should behave identically to + Implementation note: This method should behave identically to:: + store_key(context, get_key(context, )) + although it is preferable to perform this operation within the key manager to avoid unnecessary handling of the key material. """ diff --git a/nova/notifications.py b/nova/notifications.py index 8c43e5959c..f0e302a920 100644 --- a/nova/notifications.py +++ b/nova/notifications.py @@ -319,10 +319,15 @@ def info_from_instance(context, instance_ref, network_info, """Get detailed instance information for an instance which is common to all notifications. - :param network_info: network_info provided if not None - :param system_metadata: system_metadata DB entries for the instance, - if not None. *NOTE*: Currently unused here in trunk, but needed for - potential custom modifications. + :param:network_info: network_info provided if not None + :param:system_metadata: system_metadata DB entries for the instance, + if not None + + .. note:: + + Currently unused here in trunk, but needed for potential custom + modifications. + """ def null_safe_str(s): diff --git a/nova/objects/base.py b/nova/objects/base.py index 63a2705aaa..7830601ec8 100644 --- a/nova/objects/base.py +++ b/nova/objects/base.py @@ -339,9 +339,9 @@ def obj_make_compatible(self, primitive, target_version): :param:primitive: The result of self.obj_to_primitive() :param:target_version: The version string requested by the recipient - of the object. - :param:raises: nova.exception.UnsupportedObjectError if conversion - is not possible for some reason. + of the object + :raises: nova.exception.UnsupportedObjectError if conversion + is not possible for some reason """ pass diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 275e7db89d..136e98eb1e 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -389,13 +389,15 @@ def save(self, context, expected_vm_state=None, self.what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. - :param context: Security context - :param expected_task_state: Optional tuple of valid task states - for the instance to be in. - :param expected_vm_state: Optional tuple of valid vm states - for the instance to be in. + + :param:context: Security context + :param:expected_task_state: Optional tuple of valid task states + for the instance to be in + :param:expected_vm_state: Optional tuple of valid vm states + for the instance to be in :param admin_state_reset: True if admin API is forcing setting - of task_state/vm_state. + of task_state/vm_state + """ cell_type = cells_opts.get_cell_type() @@ -680,14 +682,15 @@ def get_active_by_window_joined(cls, context, begin, end=None, expected_attrs=None): """Get instances and joins active during a certain time window. - :param context: nova request context - :param begin: datetime for the start of the time window - :param end: datetime for the end of the time window - :param project_id: used to filter instances by project - :param host: used to filter instances on a given compute host - :param expected_attrs: list of related fields that can be joined + :param:context: nova request context + :param:begin: datetime for the start of the time window + :param:end: datetime for the end of the time window + :param:project_id: used to filter instances by project + :param:host: used to filter instances on a given compute host + :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances :returns: InstanceList + """ # NOTE(mriedem): We have to convert the datetime objects to string # primitives for the remote call. diff --git a/nova/objects/pci_device.py b/nova/objects/pci_device.py index b6fa5a6435..32caadeebd 100644 --- a/nova/objects/pci_device.py +++ b/nova/objects/pci_device.py @@ -54,12 +54,14 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject): the device object is changed to deleted state and no longer synced with the DB. - Filed notes: - 'dev_id': - Hypervisor's identification for the device, the string format - is hypervisor specific - 'extra_info': - Device-specific properties like PF address, switch ip address etc. + Filed notes:: + + | 'dev_id': + | Hypervisor's identification for the device, the string format + | is hypervisor specific + | 'extra_info': + | Device-specific properties like PF address, switch ip address etc. + """ # Version 1.0: Initial version diff --git a/nova/openstack/common/network_utils.py b/nova/openstack/common/network_utils.py index 88f4c9f638..331edcb411 100644 --- a/nova/openstack/common/network_utils.py +++ b/nova/openstack/common/network_utils.py @@ -113,16 +113,15 @@ def set_tcp_keepalive(sock, tcp_keepalive=True, This function configures tcp keepalive parameters if users wish to do so. - :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are - not sure, this should be True, and default values will be used. - :param tcp_keepidle: time to wait before starting to send keepalive probes + :param:tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are + not sure, this should be True, and default values will be used + :param:tcp_keepidle: time to wait before starting to send keepalive probes + :param:tcp_keepalive_interval: time between successive probes, once the + initial wait time is over + :param:tcp_keepalive_count: number of probes to send before the connection + is killed - :param tcp_keepalive_interval: time between successive probes, once the - initial wait time is over - - :param tcp_keepalive_count: number of probes to send before the connection - is killed """ # NOTE(praneshp): Despite keepalive being a tcp concept, the level is diff --git a/nova/openstack/common/report/report.py b/nova/openstack/common/report/report.py index 7fca30c777..730ab4ac0c 100644 --- a/nova/openstack/common/report/report.py +++ b/nova/openstack/common/report/report.py @@ -89,9 +89,9 @@ class ReportSection(object): :func:`BasicReport.add_section` :param view: the top-level view for this section - :param generator: the generator for this section - (any callable object which takes - no parameters and returns a data model) + :param generator: the generator for this section which could be + any callable object which takes + no parameters and returns a data model """ def __init__(self, view, generator): diff --git a/nova/pci/pci_request.py b/nova/pci/pci_request.py index d07dfbd96d..cb031e5cc1 100644 --- a/nova/pci/pci_request.py +++ b/nova/pci/pci_request.py @@ -13,23 +13,27 @@ # License for the specific language governing permissions and limitations # under the License. -""" Example of a PCI alias: - pci_alias = '{ - "name": "QuicAssist", - "product_id": "0443", - "vendor_id": "8086", - "device_type": "ACCEL", - }' - - Aliases with the same name and the same device_type are OR operation: - pci_alias = '{ - "name": "QuicAssist", - "product_id": "0442", - "vendor_id": "8086", - "device_type": "ACCEL", - }' +""" Example of a PCI alias:: + + | pci_alias = '{ + | "name": "QuicAssist", + | "product_id": "0443", + | "vendor_id": "8086", + | "device_type": "ACCEL", + | }' + + Aliases with the same name and the same device_type are OR operation:: + + | pci_alias = '{ + | "name": "QuicAssist", + | "product_id": "0442", + | "vendor_id": "8086", + | "device_type": "ACCEL", + | }' + These 2 aliases define a device request meaning: vendor_id is "8086" and product id is "0442" or "0443". + """ import copy @@ -159,18 +163,20 @@ def get_pci_requests_from_flavor(flavor): optional 'alias_name' is the corresponding alias definition name. Example: - Assume alias configuration is: - {'vendor_id':'8086', - 'device_id':'1502', - 'name':'alias_1'} + Assume alias configuration is:: + + | {'vendor_id':'8086', + | 'device_id':'1502', + | 'name':'alias_1'} The flavor extra specs includes: 'pci_passthrough:alias': 'alias_1:2'. - The returned pci_requests are: - pci_requests = [{'count':2, - 'specs': [{'vendor_id':'8086', - 'device_id':'1502'}], - 'alias_name': 'alias_1'}] + The returned pci_requests are:: + + | pci_requests = [{'count':2, + | 'specs': [{'vendor_id':'8086', + | 'device_id':'1502'}], + | 'alias_name': 'alias_1'}] :param flavor: the flavor to be checked :returns: a list of pci requests diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py index a15193a8f6..6d383dde09 100644 --- a/nova/scheduler/filters/isolated_hosts_filter.py +++ b/nova/scheduler/filters/isolated_hosts_filter.py @@ -41,18 +41,20 @@ class IsolatedHostsFilter(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): """Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set - to True: - | isolated_image | non_isolated_image - -------------+----------------+------------------- - iso_host | True | False - non_iso_host | False | True + to True:: + + | | isolated_image | non_isolated_image + | -------------+----------------+------------------- + | iso_host | True | False + | non_iso_host | False | True Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set - to False: - | isolated_image | non_isolated_image - -------------+----------------+------------------- - iso_host | True | True - non_iso_host | False | True + to False:: + + | | isolated_image | non_isolated_image + | -------------+----------------+------------------- + | iso_host | True | True + | non_iso_host | False | True """ # If the configuration does not list any hosts, the filter will always diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py index 0726d22148..5855649f24 100644 --- a/nova/scheduler/filters/pci_passthrough_filter.py +++ b/nova/scheduler/filters/pci_passthrough_filter.py @@ -26,13 +26,17 @@ class PciPassthroughFilter(filters.BaseHostFilter): to meet the device requests in the 'extra_specs' for the flavor. PCI resource tracker provides updated summary information about the - PCI devices for each host, like: - [{"count": 5, "vendor_id": "8086", "product_id": "1520", - "extra_info":'{}'}], - and VM requests PCI devices via PCI requests, like: - [{"count": 1, "vendor_id": "8086", "product_id": "1520",}]. + PCI devices for each host, like:: + + | [{"count": 5, "vendor_id": "8086", "product_id": "1520", + | "extra_info":'{}'}], + + and VM requests PCI devices via PCI requests, like:: + + | [{"count": 1, "vendor_id": "8086", "product_id": "1520",}]. The filter checks if the host passes or not based on this information. + """ def host_passes(self, host_state, filter_properties): diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index d15290a2a7..4ace9c8d02 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -18,14 +18,14 @@ Filter to add support for Trusted Computing Pools. Filter that only schedules tasks on a host if the integrity (trust) -of that host matches the trust requested in the `extra_specs' for the -flavor. The `extra_specs' will contain a key/value pair where the -key is `trust'. The value of this pair (`trusted'/`untrusted') must +of that host matches the trust requested in the ``extra_specs`` for the +flavor. The ``extra_specs`` will contain a key/value pair where the +key is ``trust``. The value of this pair (``trusted``/``untrusted``) must match the integrity of that host (obtained from the Attestation service) before the task can be scheduled on that host. Note that the parameters to control access to the Attestation Service -are in the `nova.conf' file in a separate `trust' section. For example, +are in the ``nova.conf`` file in a separate ``trust`` section. For example, the config file will look something like: [DEFAULT] @@ -34,7 +34,8 @@ [trust] server=attester.mynetwork.com -Details on the specific parameters can be found in the file `trust_attest.py'. +Details on the specific parameters can be found in the file +``trust_attest.py``. Details on setting up and using an Attestation Service can be found at the Open Attestation project at: diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 6368cb649b..b46c37a9d2 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -3024,15 +3024,15 @@ class ServersAllExtensionsTestCase(test.TestCase): an exception because of a malformed request before the core API gets a chance to validate the request and return a 422 response. - For example, AccessIPsController extends servers.Controller: - - @wsgi.extends - def create(self, req, resp_obj, body): - context = req.environ['nova.context'] - if authorize(context) and 'server' in resp_obj.obj: - resp_obj.attach(xml=AccessIPTemplate()) - server = resp_obj.obj['server'] - self._extend_server(req, server) + For example, AccessIPsController extends servers.Controller:: + + | @wsgi.extends + | def create(self, req, resp_obj, body): + | context = req.environ['nova.context'] + | if authorize(context) and 'server' in resp_obj.obj: + | resp_obj.attach(xml=AccessIPTemplate()) + | server = resp_obj.obj['server'] + | self._extend_server(req, server) we want to ensure that the extension isn't barfing on an invalid body. diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 777c63c9ff..0edab4760a 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -4838,14 +4838,14 @@ class ServersAllExtensionsTestCase(test.TestCase): an exception because of a malformed request before the core API gets a chance to validate the request and return a 422 response. - For example, ServerDiskConfigController extends servers.Controller: - - @wsgi.extends - def create(self, req, body): - if 'server' in body: - self._set_disk_config(body['server']) - resp_obj = (yield) - self._show(req, resp_obj) + For example, ServerDiskConfigController extends servers.Controller:: + + | @wsgi.extends + | def create(self, req, body): + | if 'server' in body: + | self._set_disk_config(body['server']) + | resp_obj = (yield) + | self._show(req, resp_obj) we want to ensure that the extension isn't barfing on an invalid body. diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 0a5a1c0653..ee284d122f 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -1077,9 +1077,10 @@ def test_set_instance_host_and_node(self): class NoInstanceTypesInSysMetadata(ResizeClaimTestCase): """Make sure we handle the case where the following are true: - 1) Compute node C gets upgraded to code that looks for instance types in + + #) Compute node C gets upgraded to code that looks for instance types in system metadata. AND - 2) C already has instances in the process of migrating that do not have + #) C already has instances in the process of migrating that do not have stashed instance types. bug 1164110 diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index f0cf594b60..603d5fcee6 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -31,14 +31,14 @@ 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. -For postgres on Ubuntu this can be done with the following commands: - -sudo -u postgres psql -postgres=# create user openstack_citest with createdb login password - 'openstack_citest'; -postgres=# create database openstack_citest with owner openstack_citest; -postgres=# create database openstack_baremetal_citest with owner - openstack_citest; +For postgres on Ubuntu this can be done with the following commands:: + +| sudo -u postgres psql +| postgres=# create user openstack_citest with createdb login password +| 'openstack_citest'; +| postgres=# create database openstack_citest with owner openstack_citest; +| postgres=# create database openstack_baremetal_citest with owner +| openstack_citest; """ diff --git a/nova/tests/image_fixtures.py b/nova/tests/image_fixtures.py index 771b5e1a3f..9ab09b989a 100644 --- a/nova/tests/image_fixtures.py +++ b/nova/tests/image_fixtures.py @@ -26,16 +26,17 @@ def get_image_fixtures(): start at 123 and go to 131, with the following brief summary of image attributes: - ID Type Status Notes - ----------------------------------------------------------------- - 123 Public image active - 124 Snapshot queued - 125 Snapshot saving - 126 Snapshot active - 127 Snapshot killed - 128 Snapshot deleted - 129 Snapshot pending_delete - 130 Public image active Has no name + | ID Type Status Notes + | ---------------------------------------------------------- + | 123 Public image active + | 124 Snapshot queued + | 125 Snapshot saving + | 126 Snapshot active + | 127 Snapshot killed + | 128 Snapshot deleted + | 129 Snapshot pending_delete + | 130 Public image active Has no name + """ image_id = 123 From 9d64a827b9c2c5f332b3e57f6cb818d3f4735d23 Mon Sep 17 00:00:00 2001 From: Thang Pham Date: Wed, 23 Jul 2014 17:15:52 -0400 Subject: [PATCH 317/486] Allow empty volumes to be created The following patch allows an empty volume to be created and attached to an instance at boot time. Today, you can define a bootable volume that is sourced from an image, volume, or snapshot. However, you cannot define a volume that is sourced from 'blank', even though it is one of the source options available. For example, the following command will not work: nova boot --flavor m1.tiny --block-device source=blank,dest=volume,size=1,bootindex=0 test. This is because the method used to identify ephemeral volumes (new_format_is_ephemeral) believes any block device with source_type=blank is an ephemeral, and there is no logic to handle volumes where source_type=blank. The following patch fixes these bugs by properly identifying ephemeral block devices and creating blank/empty volumes when source_type=blank and destination_type=volume. Change-Id: I5aa9684bfad1749fadff3018b13a225ed8f16fe8 Closes-Bug: #1347028 Closes-Bug: #1347499 --- nova/block_device.py | 8 +- nova/compute/manager.py | 10 +- nova/tests/compute/test_compute.py | 132 ++++++++++++++++++++++ nova/tests/compute/test_compute_utils.py | 36 ++++-- nova/tests/test_block_device.py | 10 ++ nova/tests/virt/libvirt/test_blockinfo.py | 23 +++- nova/tests/virt/test_block_device.py | 70 +++++++++++- nova/virt/block_device.py | 25 +++- nova/virt/libvirt/blockinfo.py | 2 + 9 files changed, 300 insertions(+), 16 deletions(-) diff --git a/nova/block_device.py b/nova/block_device.py index 0f15632e28..d6999b3b19 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -186,6 +186,9 @@ def from_api(cls, api_dict): if source_type not in ('volume', 'image', 'snapshot', 'blank'): raise exception.InvalidBDMFormat( details=_("Invalid source_type field.")) + elif source_type == 'blank' and device_uuid: + raise exception.InvalidBDMFormat( + details=_("Invalid device UUID.")) elif source_type != 'blank': if not device_uuid: raise exception.InvalidBDMFormat( @@ -411,8 +414,9 @@ def new_format_is_swap(bdm): def new_format_is_ephemeral(bdm): - if (bdm.get('source_type') == 'blank' and not - new_format_is_swap(bdm)): + if (bdm.get('source_type') == 'blank' and + bdm.get('destination_type') == 'local' and + bdm.get('guest_format') != 'swap'): return True return False diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 660785e1ad..55320294cf 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1694,8 +1694,9 @@ def _default_block_device_names(self, context, instance, root_bdm.save() def _is_mapping(bdm): - return (bdm.source_type in ('image', 'volume', 'snapshot') and - driver_block_device.is_implemented(bdm)) + return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') + and bdm.destination_type == 'volume' + and driver_block_device.is_implemented(bdm)) ephemerals = filter(block_device.new_format_is_ephemeral, block_devices) @@ -1731,6 +1732,11 @@ def _prep_block_device(self, context, instance, bdms, driver_block_device.convert_images(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, + do_check_attach=do_check_attach) + + driver_block_device.attach_block_devices( + driver_block_device.convert_blanks(bdms), + context, instance, self.volume_api, + self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach)) } diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 8991a7ab60..d20002fb0e 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -1094,6 +1094,80 @@ def test_prep_block_device_over_quota_failure(self, mock_create): self.context, instance, bdms) self.assertTrue(mock_create.called) + @mock.patch.object(nova.virt.block_device, 'get_swap') + @mock.patch.object(nova.virt.block_device, 'convert_blanks') + @mock.patch.object(nova.virt.block_device, 'convert_images') + @mock.patch.object(nova.virt.block_device, 'convert_snapshots') + @mock.patch.object(nova.virt.block_device, 'convert_volumes') + @mock.patch.object(nova.virt.block_device, 'convert_ephemerals') + @mock.patch.object(nova.virt.block_device, 'convert_swap') + @mock.patch.object(nova.virt.block_device, 'attach_block_devices') + def test_prep_block_device_with_blanks(self, attach_block_devices, + convert_swap, convert_ephemerals, + convert_volumes, convert_snapshots, + convert_images, convert_blanks, + get_swap): + instance = self._create_fake_instance() + instance['root_device_name'] = '/dev/vda' + root_volume = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'instance_uuid': 'fake-instance', + 'source_type': 'image', + 'destination_type': 'volume', + 'image_id': 'fake-image-id-1', + 'volume_size': 1, + 'boot_index': 0})) + blank_volume1 = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'instance_uuid': 'fake-instance', + 'source_type': 'blank', + 'destination_type': 'volume', + 'volume_size': 1, + 'boot_index': 1})) + blank_volume2 = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'instance_uuid': 'fake-instance', + 'source_type': 'blank', + 'destination_type': 'volume', + 'volume_size': 1, + 'boot_index': 2})) + bdms = [blank_volume1, blank_volume2, root_volume] + + def fake_attach_block_devices(bdm, *args, **kwargs): + return bdm + + convert_swap.return_value = [] + convert_ephemerals.return_value = [] + convert_volumes.return_value = [blank_volume1, blank_volume2] + convert_snapshots.return_value = [] + convert_images.return_value = [root_volume] + convert_blanks.return_value = [] + attach_block_devices.side_effect = fake_attach_block_devices + get_swap.return_value = [] + + expected_block_device_info = { + 'root_device_name': '/dev/vda', + 'swap': [], + 'ephemerals': [], + 'block_device_mapping': bdms + } + + manager = compute_manager.ComputeManager() + manager.use_legacy_block_device_info = False + block_device_info = manager._prep_block_device(self.context, instance, + bdms) + + convert_swap.assert_called_once_with(bdms) + convert_ephemerals.assert_called_once_with(bdms) + convert_volumes.assert_called_once_with(bdms) + convert_snapshots.assert_called_once_with(bdms) + convert_images.assert_called_once_with(bdms) + convert_blanks.assert_called_once_with(bdms) + + self.assertEqual(expected_block_device_info, block_device_info) + self.assertEqual(4, attach_block_devices.call_count) + get_swap.assert_called_once_with([]) + class ComputeTestCase(BaseTestCase): def test_wrap_instance_fault(self): @@ -6800,6 +6874,64 @@ def test_default_block_device_names_no_root_device(self): instance, {}, bdms) + def test_default_block_device_names_with_blank_volumes(self): + instance = self._create_fake_instance() + image_meta = {} + root_volume = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'id': 1, 'instance_uuid': 'fake-instance', + 'source_type': 'volume', + 'destination_type': 'volume', + 'image_id': 'fake-image-id-1', + 'boot_index': 0})) + blank_volume1 = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'id': 2, 'instance_uuid': 'fake-instance', + 'source_type': 'blank', + 'destination_type': 'volume', + 'boot_index': -1})) + blank_volume2 = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'id': 3, 'instance_uuid': 'fake-instance', + 'source_type': 'blank', + 'destination_type': 'volume', + 'boot_index': -1})) + ephemeral = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'id': 4, 'instance_uuid': 'fake-instance', + 'source_type': 'blank', + 'destination_type': 'local'})) + swap = objects.BlockDeviceMapping( + **fake_block_device.FakeDbBlockDeviceDict({ + 'id': 5, 'instance_uuid': 'fake-instance', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': 'swap' + })) + bdms = block_device_obj.block_device_make_list( + self.context, [root_volume, blank_volume1, blank_volume2, + ephemeral, swap]) + + with contextlib.nested( + mock.patch.object(self.compute, '_default_root_device_name', + return_value='/dev/vda'), + mock.patch.object(self.compute, '_instance_update'), + mock.patch.object(objects.BlockDeviceMapping, 'save'), + mock.patch.object(self.compute, + '_default_device_names_for_instance') + ) as (default_root_device, instance_update, object_save, + default_device_names): + self.compute._default_block_device_names(self.context, instance, + image_meta, bdms) + default_root_device.assert_called_once_with(instance, image_meta, + bdms[0]) + instance_update.assert_called_once_with( + self.context, instance['uuid'], root_device_name='/dev/vda') + self.assertTrue(object_save.called) + default_device_names.assert_called_once_with(instance, + '/dev/vda', [bdms[-2]], [bdms[-1]], + [bdm for bdm in bdms[:-2]]) + def test_reserve_block_device_name(self): instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/vda'}) diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py index a9d13b00bb..307eb11085 100644 --- a/nova/tests/compute/test_compute_utils.py +++ b/nova/tests/compute/test_compute_utils.py @@ -279,6 +279,12 @@ def setUp(self): 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 'fake-snapshot-id-1', + 'boot_index': -1}), + fake_block_device.FakeDbBlockDeviceDict( + {'id': 5, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/vde', + 'source_type': 'blank', + 'destination_type': 'volume', 'boot_index': -1})]) self.flavor = {'swap': 4} self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2} @@ -326,11 +332,14 @@ def test_only_block_device_mapping(self): for original, new in zip(original_bdm, self.block_device_mapping): self.assertEqual(original.device_name, new.device_name) - # Asser it defaults the missing one as expected + # Assert it defaults the missing one as expected self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], [], self.block_device_mapping) - self.assertEqual(self.block_device_mapping[1]['device_name'], - '/dev/vdb') + self.assertEqual('/dev/vdb', + self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vdc', + self.block_device_mapping[2]['device_name']) def test_with_ephemerals(self): # Test ephemeral gets assigned @@ -340,10 +349,13 @@ def test_with_ephemerals(self): self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) - self.assertEqual(self.block_device_mapping[1]['device_name'], - '/dev/vdc') + self.assertEqual('/dev/vdc', + self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vdd', + self.block_device_mapping[2]['device_name']) def test_with_swap(self): # Test swap only @@ -354,11 +366,14 @@ def test_with_swap(self): # Test swap and block_device_mapping self.swap[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') - self.assertEqual(self.block_device_mapping[1]['device_name'], - '/dev/vdc') + self.assertEqual('/dev/vdc', + self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vdd', + self.block_device_mapping[2]['device_name']) def test_all_together(self): # Test swap missing @@ -379,12 +394,15 @@ def test_all_together(self): self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') - self.assertEqual(self.block_device_mapping[1]['device_name'], - '/dev/vdd') + self.assertEqual('/dev/vdd', + self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vde', + self.block_device_mapping[2]['device_name']) class UsageInfoTestCase(test.TestCase): diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py index bd3928b138..3b817ac338 100644 --- a/nova/tests/test_block_device.py +++ b/nova/tests/test_block_device.py @@ -524,6 +524,16 @@ def test_from_api(self): block_device.BlockDeviceDict.from_api(api), matchers.IsSubDictOf(new)) + def test_from_api_invalid_blank_id(self): + api_dict = {'id': 1, + 'source_type': 'blank', + 'destination_type': 'volume', + 'uuid': 'fake-volume-id-1', + 'delete_on_termination': True, + 'boot_index': -1} + self.assertRaises(exception.InvalidBDMFormat, + block_device.BlockDeviceDict.from_api, api_dict) + def test_legacy(self): for legacy, new in zip(self.legacy_mapping, self.new_mapping): self.assertThat( diff --git a/nova/tests/virt/libvirt/test_blockinfo.py b/nova/tests/virt/libvirt/test_blockinfo.py index fe36bb5beb..b5a3cd3740 100644 --- a/nova/tests/virt/libvirt/test_blockinfo.py +++ b/nova/tests/virt/libvirt/test_blockinfo.py @@ -893,6 +893,15 @@ def setUp(self): 'disk_bus': 'virtio', 'destination_type': 'volume', 'snapshot_id': 'fake-snapshot-id-1', + 'boot_index': -1})), + objects.BlockDeviceMapping(self.context, + **fake_block_device.FakeDbBlockDeviceDict( + {'id': 5, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/vde', + 'source_type': 'blank', + 'device_type': 'disk', + 'disk_bus': 'virtio', + 'destination_type': 'volume', 'boot_index': -1}))] def tearDown(self): @@ -915,11 +924,14 @@ def test_only_block_device_mapping(self): original_bdm, self.block_device_mapping): self.assertEqual(original.device_name, defaulted.device_name) - # Asser it defaults the missing one as expected + # Assert it defaults the missing one as expected self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], [], self.block_device_mapping) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vde', + self.block_device_mapping[2]['device_name']) def test_with_ephemerals(self): # Test ephemeral gets assigned @@ -929,10 +941,13 @@ def test_with_ephemerals(self): self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name']) self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vde', + self.block_device_mapping[2]['device_name']) def test_with_swap(self): # Test swap only @@ -943,11 +958,14 @@ def test_with_swap(self): # Test swap and block_device_mapping self.swap[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names([], self.swap, self.block_device_mapping) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vde', + self.block_device_mapping[2]['device_name']) def test_all_together(self): # Test swap missing @@ -968,9 +986,12 @@ def test_all_together(self): self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None + self.block_device_mapping[2]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name']) self.assertEqual('/dev/vdc', self.swap[0]['device_name']) self.assertEqual('/dev/vdd', self.block_device_mapping[1]['device_name']) + self.assertEqual('/dev/vde', + self.block_device_mapping[2]['device_name']) diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py index af727b5d55..7008991f36 100644 --- a/nova/tests/virt/test_block_device.py +++ b/nova/tests/virt/test_block_device.py @@ -12,12 +12,15 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib + import mock from nova import block_device from nova import context from nova.openstack.common import jsonutils from nova import test +from nova.tests import fake_instance from nova.tests import matchers from nova.virt import block_device as driver_block_device from nova.virt import driver @@ -31,7 +34,8 @@ class TestDriverBlockDevice(test.NoDBTestCase): 'ephemeral': driver_block_device.DriverEphemeralBlockDevice, 'volume': driver_block_device.DriverVolumeBlockDevice, 'snapshot': driver_block_device.DriverSnapshotBlockDevice, - 'image': driver_block_device.DriverImageBlockDevice + 'image': driver_block_device.DriverImageBlockDevice, + 'blank': driver_block_device.DriverBlankBlockDevice } swap_bdm = block_device.BlockDeviceDict( @@ -163,6 +167,34 @@ class TestDriverBlockDevice(test.NoDBTestCase): 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} + blank_bdm = block_device.BlockDeviceDict( + {'id': 6, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sda2', + 'delete_on_termination': True, + 'volume_size': 3, + 'disk_bus': 'scsi', + 'device_type': 'disk', + 'source_type': 'blank', + 'destination_type': 'volume', + 'connection_info': '{"fake": "connection_info"}', + 'snapshot_id': 'fake-snapshot-id-1', + 'volume_id': 'fake-volume-id-2', + 'boot_index': -1}) + + blank_driver_bdm = { + 'mount_device': '/dev/sda2', + 'connection_info': {"fake": "connection_info"}, + 'delete_on_termination': True, + 'disk_bus': 'scsi', + 'device_type': 'disk', + 'guest_format': None, + 'boot_index': -1} + + blank_legacy_driver_bdm = { + 'mount_device': '/dev/sda2', + 'connection_info': {"fake": "connection_info"}, + 'delete_on_termination': True} + def setUp(self): super(TestDriverBlockDevice, self).setUp() self.volume_api = self.mox.CreateMock(cinder.API) @@ -275,6 +307,15 @@ def test_driver_image_block_device_destination_local(self): self.assertRaises(driver_block_device._InvalidType, self.driver_classes['image'], bdm) + def test_driver_blank_block_device(self): + self._test_driver_device('blank') + + test_bdm = self.driver_classes['blank']( + self.blank_bdm) + self.assertEqual(6, test_bdm._bdm_obj.id) + self.assertEqual('fake-volume-id-2', test_bdm.volume_id) + self.assertEqual(3, test_bdm.volume_size) + def _test_volume_attach(self, driver_bdm, bdm_dict, fake_volume, check_attach=True, fail_check_attach=False, driver_attach=False, @@ -546,6 +587,33 @@ def test_image_attach_volume(self): self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') + def test_blank_attach_volume(self): + no_blank_volume = self.blank_bdm.copy() + no_blank_volume['volume_id'] = None + test_bdm = self.driver_classes['blank'](no_blank_volume) + instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, + **{'uuid': 'fake-uuid'}) + volume_class = self.driver_classes['volume'] + volume = {'id': 'fake-volume-id-2', + 'display_name': 'fake-uuid-blank-vol'} + + with contextlib.nested( + mock.patch.object(self.volume_api, 'create', return_value=volume), + mock.patch.object(volume_class, 'attach') + ) as (vol_create, vol_attach): + test_bdm.attach(self.context, instance, self.volume_api, + self.virt_driver) + + vol_create.assert_called_once_with(self.context, + test_bdm.volume_size, + 'fake-uuid-blank-vol', + '') + vol_attach.assert_called_once_with(self.context, instance, + self.volume_api, + self.virt_driver, + do_check_attach=True) + self.assertEqual('fake-volume-id-2', test_bdm.volume_id) + def test_convert_block_devices(self): converted = driver_block_device._convert_block_devices( self.driver_classes['volume'], diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index 0022d23311..3cdfc56b7c 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -324,6 +324,26 @@ def attach(self, context, instance, volume_api, do_check_attach=do_check_attach) +class DriverBlankBlockDevice(DriverVolumeBlockDevice): + + _valid_source = 'blank' + _proxy_as_attr = set(['volume_size', 'volume_id', 'image_id']) + + def attach(self, context, instance, volume_api, + virt_driver, wait_func=None, do_check_attach=True): + if not self.volume_id: + vol_name = instance.uuid + '-blank-vol' + vol = volume_api.create(context, self.volume_size, vol_name, '') + if wait_func: + wait_func(context, vol['id']) + + self.volume_id = vol['id'] + + super(DriverBlankBlockDevice, self).attach( + context, instance, volume_api, virt_driver, + do_check_attach=do_check_attach) + + def _convert_block_devices(device_type, block_device_mapping): def _is_transformable(bdm): try: @@ -355,6 +375,9 @@ def _is_transformable(bdm): convert_images = functools.partial(_convert_block_devices, DriverImageBlockDevice) +convert_blanks = functools.partial(_convert_block_devices, + DriverBlankBlockDevice) + def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs): def _log_and_attach(bdm): @@ -417,7 +440,7 @@ def get_swap(transformed_list): _IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice, DriverVolumeBlockDevice, DriverSnapshotBlockDevice, - DriverImageBlockDevice) + DriverImageBlockDevice, DriverBlankBlockDevice) def is_implemented(bdm): diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py index d83bee8ecb..401e83eb7b 100644 --- a/nova/virt/libvirt/blockinfo.py +++ b/nova/virt/libvirt/blockinfo.py @@ -442,6 +442,8 @@ def default_device_names(virt_type, context, instance, root_device_name, driver_block_device.convert_volumes( block_device_mapping) + driver_block_device.convert_snapshots( + block_device_mapping) + + driver_block_device.convert_blanks( block_device_mapping)) } From 81add2843af8b8f010a41e0e371f6636ffe2298a Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 7 Aug 2014 07:22:32 +0000 Subject: [PATCH 318/486] Fix the error status code of duplicated agents If posting the duplicated agent info to "create a agent" API, Nova v2 API returns a HTTP500 response(Internal Server Error). The duplicated resources are not internal errors and this behavior has been fixed on Nova v3 API. In this case, Nova v3 API returns a HTTP409 response(Conflict). This patch fixes it for Nova v2 API. Change-Id: I0b9424f9792270f6807cabcbf27a0f6e5ec42669 --- nova/api/openstack/compute/contrib/agents.py | 2 +- .../openstack/compute/contrib/test_agents.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/compute/contrib/agents.py b/nova/api/openstack/compute/contrib/agents.py index 6ad4ca064d..c05eb4ef2f 100644 --- a/nova/api/openstack/compute/contrib/agents.py +++ b/nova/api/openstack/compute/contrib/agents.py @@ -178,7 +178,7 @@ def create(self, req, body): agent_obj.create() agent['agent_id'] = agent_obj.id except exception.AgentBuildExists as ex: - raise webob.exc.HTTPServerError(explanation=ex.format_message()) + raise webob.exc.HTTPConflict(explanation=ex.format_message()) return {'agent': agent} diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/api/openstack/compute/contrib/test_agents.py index 8cc25e6854..7373efeb27 100644 --- a/nova/tests/api/openstack/compute/contrib/test_agents.py +++ b/nova/tests/api/openstack/compute/contrib/test_agents.py @@ -18,6 +18,7 @@ from nova import context from nova import db from nova.db.sqlalchemy import models +from nova import exception from nova import test fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win', @@ -128,6 +129,22 @@ def test_agents_create_key_error(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) + def test_agents_create_with_existed_agent(self): + def fake_agent_build_create_with_exited_agent(context, values): + raise exception.AgentBuildExists(**values) + + self.stubs.Set(db, 'agent_build_create', + fake_agent_build_create_with_exited_agent) + req = FakeRequest() + body = {'agent': {'hypervisor': 'kvm', + 'os': 'win', + 'architecture': 'x86', + 'version': '7.0', + 'url': 'xxx://xxxx/xxx/xxx', + 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, + body=body) + def _test_agents_create_with_invalid_length(self, key): req = FakeRequest() body = {'agent': {'hypervisor': 'kvm', From 260f5c6f508e58874ca4d448ff7b79f8117be28d Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Mon, 11 Aug 2014 13:26:20 +0800 Subject: [PATCH 319/486] Backport v3 api unittest into v2 api for attach_interface extension On most APIs, the unit test coverage of v3 is better than the one of v2. This patch ports v3 attach_interface API tests to v2 tests for improving v2 attach_interface API tests Change-Id: Ie93175602dbced361d76abbe35a7767a96d9e68e --- .../compute/contrib/test_attach_interfaces.py | 42 +++++++++++++++---- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py index c1e9d84ce8..29b93eb260 100644 --- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py @@ -136,6 +136,39 @@ def setUp(self): 'fixed_ips': port_data1['fixed_ips'], }} + @mock.patch.object(compute_api.API, 'get', + side_effect=exception.InstanceNotFound(instance_id='')) + def _test_instance_not_found(self, url, func, params, mock_get, + method='GET'): + req = webob.Request.blank(url) + req.method = method + req.headers['content-type'] = 'application/json' + req.environ['nova.context'] = self.context + self.assertRaises(exc.HTTPNotFound, func, req, *params) + + def test_show_instance_not_found(self): + attachments = attach_interfaces.InterfaceAttachmentController() + self._test_instance_not_found('/v2/fake/os-interfaces/fake', + attachments.show, ('fake', 'fake')) + + def test_index_instance_not_found(self): + attachments = attach_interfaces.InterfaceAttachmentController() + self._test_instance_not_found('/v2/fake/os-interfaces', + attachments.index, ('fake', )) + + def test_delete_instance_not_found(self): + attachments = attach_interfaces.InterfaceAttachmentController() + self._test_instance_not_found('/v2/fake/os-interfaces/fake', + attachments.delete, ('fake', 'fake'), + method='DELETE') + + def test_create_instance_not_found(self): + attachments = attach_interfaces.InterfaceAttachmentController() + self._test_instance_not_found('/v2/fake/os-interfaces', + attachments.create, + ('fake', {'interfaceAttachment': {}}), + 'POST') + def test_show(self): attachments = attach_interfaces.InterfaceAttachmentController() req = webob.Request.blank('/v2/fake/os-interfaces/show') @@ -321,15 +354,6 @@ def fake_detach_interface_invalid_state(*args, **kwargs): FAKE_UUID1, FAKE_NET_ID1) - -class InterfaceAttachTestsWithMock(test.NoDBTestCase): - def setUp(self): - super(InterfaceAttachTestsWithMock, self).setUp() - self.flags(auth_strategy=None, group='neutron') - self.flags(url='http://anyhost/', group='neutron') - self.flags(url_timeout=30, group='neutron') - self.context = context.get_admin_context() - @mock.patch.object(compute_api.API, 'get') @mock.patch.object(compute_api.API, 'attach_interface') def test_attach_interface_fixed_ip_already_in_use(self, From d20ce98a602c0215ca5acd14fd68ae3841f4e480 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 31 Jul 2014 10:48:57 +0900 Subject: [PATCH 320/486] Merge unit tests of "create a flavor" API In test_flavor_manage, there are success/bad_request tests of "create a flavor". Most parts of them are duplicated. This patch merges them for the readability and clarifying their purposes. Change-Id: Ic7fa319674594f4470b6eafd3e07e7c467b99477 --- .../compute/contrib/test_flavor_manage.py | 305 ++++-------------- 1 file changed, 57 insertions(+), 248 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py index c9b378fe80..66c70de9d8 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py @@ -96,19 +96,9 @@ def setUp(self): self.controller = flavormanage.FlavorManageController() self.app = fakes.wsgi_app(init_only=('flavors',)) - def test_delete(self): - req = fakes.HTTPRequest.blank('/v2/123/flavors/1234') - res = self.controller._delete(req, 1234) - self.assertEqual(res.status_int, 202) - - # subsequent delete should fail - self.assertRaises(webob.exc.HTTPNotFound, - self.controller._delete, req, "failtest") - - def test_create(self): - expected = { + self.request_body = { "flavor": { - "name": "azAZ09. -_", + "name": "test", "ram": 512, "vcpus": 2, "disk": 1, @@ -119,284 +109,103 @@ def test_create(self): "os-flavor-access:is_public": True, } } + self.expected_flavor = self.request_body - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in expected["flavor"]: - self.assertEqual(body["flavor"][key], expected["flavor"][key]) + def test_delete(self): + req = fakes.HTTPRequest.blank('/v2/123/flavors/1234') + res = self.controller._delete(req, 1234) + self.assertEqual(res.status_int, 202) - def test_create_invalid_name(self): - self.stubs.UnsetAll() - expected = { - "flavor": { - "name": "bad !@#!$% name", - 'id': "1", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } + # subsequent delete should fail + self.assertRaises(webob.exc.HTTPNotFound, + self.controller._delete, req, "failtest") + def _create_flavor_success_case(self, body): url = '/v2/fake/flavors' req = webob.Request.blank(url) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - req.body = jsonutils.dumps(expected) + req.body = jsonutils.dumps(body) res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + self.assertEqual(200, res.status_code) + return jsonutils.loads(res.body) - def test_create_flavor_name_is_whitespace(self): - request_dict = { - "flavor": { - "name": " ", - 'id': "12345", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } + def test_create(self): + body = self._create_flavor_success_case(self.request_body) + for key in self.expected_flavor["flavor"]: + self.assertEqual(body["flavor"][key], + self.expected_flavor["flavor"][key]) - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(request_dict) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + def test_create_public_default(self): + del self.request_body['flavor']['os-flavor-access:is_public'] + body = self._create_flavor_success_case(self.request_body) + for key in self.expected_flavor["flavor"]: + self.assertEqual(body["flavor"][key], + self.expected_flavor["flavor"][key]) def test_create_flavor_name_with_leading_trailing_whitespace(self): - request_dict = { - "flavor": { - "name": " test ", - 'id': "12345", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(request_dict) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 200) - body = jsonutils.loads(res.body) + self.request_body['flavor']['name'] = " test " + body = self._create_flavor_success_case(self.request_body) self.assertEqual("test", body["flavor"]["name"]) - def test_create_public_default(self): - flavor = { - "flavor": { - "name": "test", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "id": 1234, - "swap": 512, - "rxtx_factor": 1, - } - } + def test_create_without_flavorid(self): + del self.request_body['flavor']['id'] + body = self._create_flavor_success_case(self.request_body) + for key in self.expected_flavor["flavor"]: + self.assertEqual(body["flavor"][key], + self.expected_flavor["flavor"][key]) - expected = { - "flavor": { - "name": "test", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "id": unicode(1234), - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } + def _create_flavor_bad_request_case(self, body): + self.stubs.UnsetAll() url = '/v2/fake/flavors' req = webob.Request.blank(url) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - req.body = jsonutils.dumps(flavor) + req.body = jsonutils.dumps(body) res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in expected["flavor"]: - self.assertEqual(body["flavor"][key], expected["flavor"][key]) + self.assertEqual(res.status_code, 400) - def test_create_without_flavorid(self): - expected = { - "flavor": { - "name": "test", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } + def test_create_invalid_name(self): + self.request_body['flavor']['name'] = 'bad !@#!$% name' + self._create_flavor_bad_request_case(self.request_body) - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in expected["flavor"]: - self.assertEqual(body["flavor"][key], expected["flavor"][key]) + def test_create_flavor_name_is_whitespace(self): + self.request_body['flavor']['name'] = ' ' + self._create_flavor_bad_request_case(self.request_body) def test_create_without_flavorname(self): - expected = { - "flavor": { - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) + del self.request_body['flavor']['name'] + self._create_flavor_bad_request_case(self.request_body) def test_create_empty_body(self): - self.stubs.UnsetAll() - expected = { + body = { "flavor": {} } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + self._create_flavor_bad_request_case(body) def test_create_no_body(self): - self.stubs.UnsetAll() - expected = {} - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + body = {} + self._create_flavor_bad_request_case(body) def test_create_invalid_format_body(self): - self.stubs.UnsetAll() - expected = { + body = { "flavor": [] } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + self._create_flavor_bad_request_case(body) def test_create_invalid_flavorid(self): - self.stubs.UnsetAll() - expected = { - "flavor": { - "name": "test", - 'id': "!@#!$#!$^#&^$&", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&" + self._create_flavor_bad_request_case(self.request_body) def test_create_check_flavor_id_length(self): - self.stubs.UnsetAll() MAX_LENGTH = 255 - expected = { - "flavor": { - "name": "test", - 'id': "a" * (MAX_LENGTH + 1), - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1) + self._create_flavor_bad_request_case(self.request_body) def test_create_with_leading_trailing_whitespaces_in_flavor_id(self): - self.stubs.UnsetAll() - expected = { - "flavor": { - "name": "test", - 'id': " bad_id ", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True, - } - } - - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - self.assertEqual(res.status_code, 400) + self.request_body['flavor']['id'] = " bad_id " + self._create_flavor_bad_request_case(self.request_body) def test_flavor_exists_exception_returns_409(self): expected = { From 05a7639fd66e4c2d0dca6df44f333d6b962d81f7 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Mon, 11 Aug 2014 05:02:02 +0000 Subject: [PATCH 321/486] Add some v2 flavor_manage API tests On most APIs, the unit test coverage of v3 is better than the one of v2. This patch ports v3 flavor_manage API tests to v2 tests for improving v2 API tests. Change-Id: Ic52b68ea864ba9c4649fe22a431e5e55e72141c5 --- .../compute/contrib/test_flavor_manage.py | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py index 66c70de9d8..43a84e12f3 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py @@ -174,6 +174,10 @@ def test_create_flavor_name_is_whitespace(self): self.request_body['flavor']['name'] = ' ' self._create_flavor_bad_request_case(self.request_body) + def test_create_with_name_too_long(self): + self.request_body['flavor']['name'] = 'a' * 256 + self._create_flavor_bad_request_case(self.request_body) + def test_create_without_flavorname(self): del self.request_body['flavor']['name'] self._create_flavor_bad_request_case(self.request_body) @@ -207,6 +211,46 @@ def test_create_with_leading_trailing_whitespaces_in_flavor_id(self): self.request_body['flavor']['id'] = " bad_id " self._create_flavor_bad_request_case(self.request_body) + def test_create_without_ram(self): + del self.request_body['flavor']['ram'] + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_0_ram(self): + self.request_body['flavor']['ram'] = 0 + self._create_flavor_bad_request_case(self.request_body) + + def test_create_without_vcpus(self): + del self.request_body['flavor']['vcpus'] + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_0_vcpus(self): + self.request_body['flavor']['vcpus'] = 0 + self._create_flavor_bad_request_case(self.request_body) + + def test_create_without_disk(self): + del self.request_body['flavor']['disk'] + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_minus_disk(self): + self.request_body['flavor']['disk'] = -1 + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_minus_ephemeral(self): + self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1 + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_minus_swap(self): + self.request_body['flavor']['swap'] = -1 + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_minus_rxtx_factor(self): + self.request_body['flavor']['rxtx_factor'] = -1 + self._create_flavor_bad_request_case(self.request_body) + + def test_create_with_non_boolean_is_public(self): + self.request_body['flavor']['os-flavor-access:is_public'] = 123 + self._create_flavor_bad_request_case(self.request_body) + def test_flavor_exists_exception_returns_409(self): expected = { "flavor": { From 5885517d7542c9695c43770be8b488117df57e2f Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Mon, 11 Aug 2014 14:26:19 +0800 Subject: [PATCH 322/486] Use common get_instance function in v2 attach_interface This patch use common get_instance function in v2 attach_interface. That can make the instance behavior consistent with v2.1 api. Then v2.1 and v2 can share same unittest. Change-Id: Id531377d5d34d76a4ffafaa02ecab585cbcb369b --- .../compute/contrib/attach_interfaces.py | 31 ++++++------------- .../compute/contrib/test_attach_interfaces.py | 3 +- 2 files changed, 11 insertions(+), 23 deletions(-) diff --git a/nova/api/openstack/compute/contrib/attach_interfaces.py b/nova/api/openstack/compute/contrib/attach_interfaces.py index 01f22c00b5..3d1b867231 100644 --- a/nova/api/openstack/compute/contrib/attach_interfaces.py +++ b/nova/api/openstack/compute/contrib/attach_interfaces.py @@ -61,10 +61,7 @@ def show(self, req, server_id, id): authorize(context) port_id = id - try: - self.compute_api.get(context, server_id) - except exception.NotFound: - raise exc.HTTPNotFound() + common.get_instance(self.compute_api, context, server_id) try: port_info = self.network_api.show_port(context, port_id) @@ -100,8 +97,9 @@ def create(self, req, server_id, body): raise exc.HTTPBadRequest() try: - instance = self.compute_api.get(context, server_id, - want_objects=True) + instance = common.get_instance(self.compute_api, + context, server_id, + want_objects=True) LOG.audit(_("Attach interface"), instance=instance) vif = self.compute_api.attach_interface(context, instance, network_id, port_id, req_ip) @@ -112,8 +110,6 @@ def create(self, req, server_id, body): exception.NetworkAmbiguous, exception.NetworkNotFound) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) - except exception.NotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except NotImplementedError: @@ -139,14 +135,10 @@ def delete(self, req, server_id, id): context = req.environ['nova.context'] authorize(context) port_id = id - - try: - instance = self.compute_api.get(context, server_id, - want_objects=True) - LOG.audit(_("Detach interface %s"), port_id, instance=instance) - - except exception.NotFound: - raise exc.HTTPNotFound() + instance = common.get_instance(self.compute_api, + context, server_id, + want_objects=True) + LOG.audit(_("Detach interface %s"), port_id, instance=instance) try: self.compute_api.detach_interface(context, instance, port_id=port_id) @@ -167,12 +159,7 @@ def _items(self, req, server_id, entity_maker): """Returns a list of attachments, transformed through entity_maker.""" context = req.environ['nova.context'] authorize(context) - - try: - instance = self.compute_api.get(context, server_id) - except exception.NotFound: - raise exc.HTTPNotFound() - + instance = common.get_instance(self.compute_api, context, server_id) results = [] search_opts = {'device_id': instance['uuid']} diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py index 29b93eb260..c9a9ee5ff5 100644 --- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py @@ -373,4 +373,5 @@ def test_attach_interface_fixed_ip_already_in_use(self, jsonutils.loads(req.body)) attach_mock.assert_called_once_with(self.context, {}, None, None, None) get_mock.assert_called_once_with(self.context, FAKE_UUID1, - want_objects=True) + want_objects=True, + expected_attrs=None) From f89d13b141eba66487b3d858cd075a47b2de6016 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 22 Apr 2014 03:36:51 -0700 Subject: [PATCH 323/486] VMware: handle case when VM snapshot delete fails Minesweeper fails on occasion with concurrent access to files. If a snapshot deletion fails with the backend exception TaskInProgress then we will retry. A generic decorator has been added to do the retry operations. Closes-bug: #1310817 Change-Id: I8ed7a24ccd34aeea49352ac98f34ec2960edbf97 --- nova/tests/virt/vmwareapi/test_driver_api.py | 31 ++++++++++++++++++++ nova/virt/vmwareapi/error_util.py | 8 ++++- nova/virt/vmwareapi/vim.py | 17 +++++++++++ nova/virt/vmwareapi/vmops.py | 2 +- 4 files changed, 56 insertions(+), 2 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index c307ac2766..f51468ec66 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -1294,6 +1294,37 @@ def test_snapshot_delete_vm_snapshot(self): self._test_snapshot() + def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1): + self._create_vm() + fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj + snapshot_ref = vmwareapi_fake.ManagedObjectReference( + value="Snapshot-123", + name="VirtualMachineSnapshot") + + with contextlib.nested( + mock.patch.object(self.conn._session, '_wait_for_task', + side_effect=exception), + mock.patch.object(time, 'sleep') + ) as (_fake_wait, _fake_sleep): + if exception != error_util.TaskInProgress: + self.assertRaises(exception, + self.conn._vmops._delete_vm_snapshot, + self.instance, fake_vm, snapshot_ref) + self.assertEqual(0, _fake_sleep.call_count) + else: + self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm, + snapshot_ref) + self.assertEqual(call_count - 1, _fake_sleep.call_count) + self.assertEqual(call_count, _fake_wait.call_count) + + def test_snapshot_delete_vm_snapshot_exception(self): + self._snapshot_delete_vm_snapshot_exception(exception.NovaException) + + def test_snapshot_delete_vm_snapshot_exception_retry(self): + self.flags(api_retry_count=5, group='vmware') + self._snapshot_delete_vm_snapshot_exception(error_util.TaskInProgress, + 5) + def test_reboot(self): self._create_vm() info = self.conn.get_info({'name': 1, 'uuid': self.uuid, diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py index 89ce19df16..a61a85a251 100644 --- a/nova/virt/vmwareapi/error_util.py +++ b/nova/virt/vmwareapi/error_util.py @@ -32,6 +32,7 @@ INVALID_PROPERTY = 'InvalidProperty' NO_PERMISSION = 'NoPermission' NOT_AUTHENTICATED = 'NotAuthenticated' +TASK_IN_PROGRESS = 'TaskInProgress' class VimException(Exception): @@ -203,6 +204,10 @@ class InvalidPowerStateException(VMwareDriverException): code = 409 +class TaskInProgress(VMwareDriverException): + msg_fmt = _("Virtual machine is busy.") + + # Populate the fault registry with the exceptions that have # special treatment. _fault_classes_registry = { @@ -215,7 +220,8 @@ class InvalidPowerStateException(VMwareDriverException): INVALID_POWER_STATE: InvalidPowerStateException, INVALID_PROPERTY: InvalidPropertyException, NO_PERMISSION: NoPermissionException, - NOT_AUTHENTICATED: NotAuthenticatedException + NOT_AUTHENTICATED: NotAuthenticatedException, + TASK_IN_PROGRESS: TaskInProgress, } diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py index d4aa456c2b..119c0fef1e 100644 --- a/nova/virt/vmwareapi/vim.py +++ b/nova/virt/vmwareapi/vim.py @@ -19,8 +19,10 @@ """ import httplib +import time import urllib2 +import decorator from oslo.config import cfg import suds @@ -41,6 +43,21 @@ CONF.register_opt(vmwareapi_wsdl_loc_opt, 'vmware') +@decorator.decorator +def retry_if_task_in_progress(f, *args, **kwargs): + retries = max(CONF.vmware.api_retry_count, 1) + delay = 1 + for attempt in range(1, retries + 1): + if attempt != 1: + time.sleep(delay) + delay = min(2 * delay, 60) + try: + f(*args, **kwargs) + return + except error_util.TaskInProgress: + pass + + def get_moref(value, type): """Get managed object reference.""" moref = suds.sudsobject.Property(value) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 15af1feb35..2fac727be0 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -698,6 +698,7 @@ def _create_vm_snapshot(self, instance, vm_ref): snapshot = task_info.result return snapshot + @vim.retry_if_task_in_progress def _delete_vm_snapshot(self, instance, vm_ref, snapshot): LOG.debug("Deleting Snapshot of the VM instance", instance=instance) delete_snapshot_task = self._session._call_method( @@ -800,7 +801,6 @@ def _copy_vmdk_content(): instance=instance) _copy_vmdk_content() - # Note(vui): handle snapshot cleanup on exceptions. self._delete_vm_snapshot(instance, vm_ref, snapshot) cookies = self._session._get_vim().client.options.transport.cookiejar From f539caeadb330b034d313e81e2d943ee27d4534a Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Mon, 11 Aug 2014 08:40:45 +0000 Subject: [PATCH 324/486] Merge BadRequest tests of "create a keypair" API In test_attach_interfaces, there are BadRequest tests of "create a keypair". Most parts of them are duplicated. This patch merges them for the readability and clarifying their purposes. Change-Id: I784f5d05969aa9a0c85b63f064190ab4e92ed7f7 --- .../compute/contrib/test_keypairs.py | 84 +++++-------------- 1 file changed, 20 insertions(+), 64 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py index 53294ecbc7..6ca588588f 100644 --- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/compute/contrib/test_keypairs.py @@ -100,20 +100,17 @@ def test_keypair_create(self): self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0) self.assertTrue(len(res_dict['keypair']['private_key']) > 0) - def test_keypair_create_with_empty_name(self): - body = {'keypair': {'name': ''}} + def _test_keypair_create_bad_request_case(self, body): req = webob.Request.blank('/v2/fake/os-keypairs') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 400) - res_dict = jsonutils.loads(res.body) - self.assertEqual( - 'Keypair data is invalid: ' - 'Keypair name must be string and between 1 ' - 'and 255 characters long', - res_dict['badRequest']['message']) + + def test_keypair_create_with_empty_name(self): + body = {'keypair': {'name': ''}} + self._test_keypair_create_bad_request_case(body) def test_keypair_create_with_name_too_long(self): body = { @@ -121,18 +118,7 @@ def test_keypair_create_with_name_too_long(self): 'name': 'a' * 256 } } - req = webob.Request.blank('/v2/fake/os-keypairs') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers['Content-Type'] = 'application/json' - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) - res_dict = jsonutils.loads(res.body) - self.assertEqual( - 'Keypair data is invalid: ' - 'Keypair name must be string and between 1 ' - 'and 255 characters long', - res_dict['badRequest']['message']) + self._test_keypair_create_bad_request_case(body) def test_keypair_create_with_non_alphanumeric_name(self): body = { @@ -140,18 +126,20 @@ def test_keypair_create_with_non_alphanumeric_name(self): 'name': 'test/keypair' } } - req = webob.Request.blank('/v2/fake/os-keypairs') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers['Content-Type'] = 'application/json' - res = req.get_response(self.app) - res_dict = jsonutils.loads(res.body) - self.assertEqual(res.status_int, 400) - res_dict = jsonutils.loads(res.body) - self.assertEqual( - "Keypair data is invalid: " - "Keypair name contains unsafe characters", - res_dict['badRequest']['message']) + self._test_keypair_create_bad_request_case(body) + + def test_keypair_import_bad_key(self): + body = { + 'keypair': { + 'name': 'create_test', + 'public_key': 'ssh-what negative', + }, + } + self._test_keypair_create_bad_request_case(body) + + def test_keypair_create_with_invalid_keypair_body(self): + body = {'alpha': {'name': 'create_test'}} + self._test_keypair_create_bad_request_case(body) def test_keypair_import(self): body = { @@ -251,26 +239,6 @@ def test_keypair_create_duplicate(self): "Key pair 'create_duplicate' already exists.", res_dict['conflictingRequest']['message']) - def test_keypair_import_bad_key(self): - body = { - 'keypair': { - 'name': 'create_test', - 'public_key': 'ssh-what negative', - }, - } - - req = webob.Request.blank('/v2/fake/os-keypairs') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers['Content-Type'] = 'application/json' - res = req.get_response(self.app) - self.assertEqual(res.status_int, 400) - - res_dict = jsonutils.loads(res.body) - self.assertEqual( - 'Keypair data is invalid: failed to generate fingerprint', - res_dict['badRequest']['message']) - def test_keypair_delete(self): req = webob.Request.blank('/v2/fake/os-keypairs/FAKE') req.method = 'DELETE' @@ -350,18 +318,6 @@ def test_detail_servers(self): self.assertIn('key_name', server_dict) self.assertEqual(server_dict['key_name'], '') - def test_keypair_create_with_invalid_keypair_body(self): - body = {'alpha': {'name': 'create_test'}} - req = webob.Request.blank('/v1.1/fake/os-keypairs') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers['Content-Type'] = 'application/json' - res = req.get_response(self.app) - res_dict = jsonutils.loads(res.body) - self.assertEqual(res.status_int, 400) - self.assertEqual(res_dict['badRequest']['message'], - "Invalid request body") - class KeypairPolicyTest(test.TestCase): From b011325cc69eb83c5b98e9554361e7a68b81bba8 Mon Sep 17 00:00:00 2001 From: Matthew Gilliard Date: Wed, 30 Jul 2014 19:27:13 +0100 Subject: [PATCH 325/486] Provide a quick way to run flake8 "run_tests.sh -p" always checks every file for errors even though you have probably only changed a handful. This patch adds "run_tests.sh -8" which only checks the files that were modified in the HEAD commit or the current working tree. An unscientific benchmark shows that this can be quite a bit faster: Before: $ time ./run_tests.sh -p Running flake8 ... ./nova/compute/manager.py:77:1: F401 'periodic_task' imported but unused real 0m37.493s user 0m37.391s sys 0m0.099s After: $ time ./run_tests.sh -8 Running flake8 on nova/compute/manager.py ./nova/compute/manager.py:77:1: F401 'periodic_task' imported but unused real 0m0.667s user 0m0.624s sys 0m0.036s Change-Id: I8dafaab40fd0427719040a76c69a8ab05e9e4386 --- run_tests.sh | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 1fecc4c9b5..abc10fab15 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -12,6 +12,7 @@ function usage { echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" + echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." @@ -43,6 +44,7 @@ function process_options { -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; + -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; @@ -82,6 +84,7 @@ testrargs= testropts= wrapper="" just_pep8=0 +just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 @@ -167,12 +170,16 @@ function copy_subunit_log { cp $LOGNAME subunit.log } -function run_pep8 { - echo "Running flake8 ..." +function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then - echo "**WARNING**:" - echo "Running flake8 without virtual env may miss OpenStack HACKING detection" + echo "**WARNING**:" + echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi +} + +function run_pep8 { + echo "Running flake8 ..." + warn_on_flake8_without_venv bash -c "${wrapper} flake8" } @@ -219,6 +226,19 @@ if [ $just_pep8 -eq 1 ]; then exit fi +if [ $just_pep8_changed -eq 1 ]; then + # NOTE(gilliard) We want use flake8 to check the entirety of every file that has + # a change in it. Unfortunately the --filenames argument to flake8 only accepts + # file *names* and there are no files named (eg) "nova/compute/manager.py". The + # --diff argument behaves surprisingly as well, because although you feed it a + # diff, it actually checks the file on disk anyway. + files=$(git diff --name-only HEAD~1 | tr '\n' ' ') + echo "Running flake8 on ${files}" + warn_on_flake8_without_venv + bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff" + exit +fi + run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, From a3d274303ef8d1a123fcfb4ae3ba85e9712fe470 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 7 Aug 2014 19:22:26 -0400 Subject: [PATCH 326/486] docs - Fix errors,warnings from document generation ERROR: Unexpected indentation. WARNING: Field list ends without a blank line; unexpected unindent. WARNING: Block quote ends without a blank line; unexpected unindent. Partial-Bug: #1351350 Change-Id: If13b5c533cf9a1e8f543b8f679c464c0dc007a30 --- nova/compute/resources/base.py | 2 +- nova/pci/pci_devspec.py | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/compute/resources/base.py b/nova/compute/resources/base.py index aebd29fb40..a04fc9f97f 100644 --- a/nova/compute/resources/base.py +++ b/nova/compute/resources/base.py @@ -44,7 +44,7 @@ def test(self, usage, limits): :param limits: limits to apply :returns: None if the test passes or a string describing the reason - why the test failed + why the test failed """ pass diff --git a/nova/pci/pci_devspec.py b/nova/pci/pci_devspec.py index c228e4a15d..a03cd80b9a 100755 --- a/nova/pci/pci_devspec.py +++ b/nova/pci/pci_devspec.py @@ -49,9 +49,11 @@ class PciAddress(object): This class checks the address fields of the pci_passthrough_whitelist configuration option, validating the address fields. Example config are: - pci_passthrough_whitelist = {"address":"*:0a:00.*", - "physical_network":"physnet1"} - pci_passthrough_whitelist = {"address":":0a:00.", + + | pci_passthrough_whitelist = {"address":"*:0a:00.*", + | "physical_network":"physnet1"} + | pci_passthrough_whitelist = {"address":":0a:00.", + This function class will validate the address fields, check for wildcards, and insert wildcards where the field is left blank. """ From a507d42cf5d9912c2b3622e84afb8b7d3278595b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 1 Aug 2014 10:36:28 -0400 Subject: [PATCH 327/486] docs - Set pbr 'warnerrors' option for doc build By setting this pbr option in setup.cfg, the doc build will fail in case of any warnings or errors occur during the build process. Closes-Bug: #1351350 Change-Id: Id4858062d2aaa4c2fe5b597e40e4e8947f544a4d --- doc/source/conf.py | 2 +- doc/source/index.rst | 1 + setup.cfg | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 83723a24ab..f83589a895 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -194,7 +194,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +#html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. diff --git a/doc/source/index.rst b/doc/source/index.rst index 37fcb48b88..123506fb09 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -48,6 +48,7 @@ Developer Docs devref/index man/index + api/autoindex API Extensions ============== diff --git a/setup.cfg b/setup.cfg index 20c9399e1b..46b34e46c1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -166,3 +166,4 @@ universal = 1 [pbr] autodoc_index_modules = 1 +warnerrors = true From 6cef3c9b75cedb0fe6ff901128452415128d60b0 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 24 Jul 2014 12:05:49 -0700 Subject: [PATCH 328/486] Correct some IPAddress DB interaction in objects We know that DB API methods want string addresses as parameters, but often an object method will be called with data from that or another object, which will be a netaddr.IPAddress in the case of that sort of data. So, we should coerce those to strings to make sure SA is happy. This also fixes a small bit of DB-loading logic where a FloatingIP would try to load its associated FixedIP explicitly. Obviously, if it's not associated, it shouldn't try to do that. Related to blueprint compute-manager-objects-juno Closes-bug: #1351020 Change-Id: I97b4a86846020b58950f7c051cb003b7b09e938a --- nova/objects/fixed_ip.py | 2 +- nova/objects/floating_ip.py | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/nova/objects/fixed_ip.py b/nova/objects/fixed_ip.py index 2c815bfdb4..889047b4ea 100644 --- a/nova/objects/fixed_ip.py +++ b/nova/objects/fixed_ip.py @@ -94,7 +94,7 @@ def get_by_address(cls, context, address, expected_attrs=None): @obj_base.remotable_classmethod def get_by_floating_address(cls, context, address): - db_fixedip = db.fixed_ip_get_by_floating_address(context, address) + db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address)) if db_fixedip is not None: return cls._from_db_object(context, cls(context), db_fixedip) diff --git a/nova/objects/floating_ip.py b/nova/objects/floating_ip.py index b0895c3e94..d65c25ce35 100644 --- a/nova/objects/floating_ip.py +++ b/nova/objects/floating_ip.py @@ -45,7 +45,8 @@ def _from_db_object(context, floatingip, db_floatingip, for field in floatingip.fields: if field not in FLOATING_IP_OPTIONAL_ATTRS: floatingip[field] = db_floatingip[field] - if 'fixed_ip' in expected_attrs: + if ('fixed_ip' in expected_attrs and + db_floatingip['fixed_ip'] is not None): floatingip.fixed_ip = objects.FixedIP._from_db_object( context, objects.FixedIP(context), db_floatingip['fixed_ip']) floatingip._context = context @@ -75,7 +76,7 @@ def get_by_id(cls, context, id): @obj_base.remotable_classmethod def get_by_address(cls, context, address): - db_floatingip = db.floating_ip_get_by_address(context, address) + db_floatingip = db.floating_ip_get_by_address(context, str(address)) return cls._from_db_object(context, cls(context), db_floatingip) @obj_base.remotable_classmethod @@ -90,8 +91,8 @@ def allocate_address(cls, context, project_id, pool, auto_assigned=False): @obj_base.remotable_classmethod def associate(cls, context, floating_address, fixed_address, host): db_fixed = db.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address, + str(floating_address), + str(fixed_address), host) if db_fixed is None: return None @@ -106,15 +107,15 @@ def associate(cls, context, floating_address, fixed_address, host): @obj_base.remotable_classmethod def deallocate(cls, context, address): - return db.floating_ip_deallocate(context, address) + return db.floating_ip_deallocate(context, str(address)) @obj_base.remotable_classmethod def destroy(cls, context, address): - db.floating_ip_destroy(context, address) + db.floating_ip_destroy(context, str(address)) @obj_base.remotable_classmethod def disassociate(cls, context, address): - db_fixed = db.floating_ip_disassociate(context, address) + db_fixed = db.floating_ip_disassociate(context, str(address)) return cls(context=context, address=address, fixed_ip_id=db_fixed['id'], @@ -180,8 +181,8 @@ def get_by_project(cls, context, project_id): @obj_base.remotable_classmethod def get_by_fixed_address(cls, context, fixed_address): - db_floatingips = db.floating_ip_get_by_fixed_address(context, - fixed_address) + db_floatingips = db.floating_ip_get_by_fixed_address( + context, str(fixed_address)) return obj_base.obj_make_list(context, cls(context), objects.FloatingIP, db_floatingips) From 7254f9b9dfbadadeb3aeda5d02bf37bfeb65e72d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 24 Jul 2014 12:33:30 -0700 Subject: [PATCH 329/486] Convert network/api.py to use FloatingIP object This converts floating IP manipulation code in the network/api.py module to use the FloatingIP object. Basically no testing of these interfaces was being done, so this adds that as well. Related to blueprint compute-manager-objects-juno Change-Id: I6120dd4a32473ca9029832503444fbeae1ab0576 --- .../compute/contrib/floating_ip_pools.py | 8 ++-- nova/network/api.py | 16 +++---- .../compute/contrib/test_floating_ip_pools.py | 9 ++-- nova/tests/integrated/test_api_samples.py | 3 +- nova/tests/network/test_api.py | 47 +++++++++++++++++++ 5 files changed, 64 insertions(+), 19 deletions(-) diff --git a/nova/api/openstack/compute/contrib/floating_ip_pools.py b/nova/api/openstack/compute/contrib/floating_ip_pools.py index b1165d1c33..7ca9831f1f 100644 --- a/nova/api/openstack/compute/contrib/floating_ip_pools.py +++ b/nova/api/openstack/compute/contrib/floating_ip_pools.py @@ -21,16 +21,16 @@ authorize = extensions.extension_authorizer('compute', 'floating_ip_pools') -def _translate_floating_ip_view(pool): +def _translate_floating_ip_view(pool_name): return { - 'name': pool['name'], + 'name': pool_name, } def _translate_floating_ip_pools_view(pools): return { - 'floating_ip_pools': [_translate_floating_ip_view(pool) - for pool in pools] + 'floating_ip_pools': [_translate_floating_ip_view(pool_name) + for pool_name in pools] } diff --git a/nova/network/api.py b/nova/network/api.py index 41f3916bb6..7240fa0430 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -112,26 +112,26 @@ def get_fixed_ip_by_address(self, context, address): def get_floating_ip(self, context, id): if not utils.is_int_like(id): raise exception.InvalidID(id=id) - return self.db.floating_ip_get(context, id) + return objects.FloatingIP.get_by_id(context, id) @wrap_check_policy def get_floating_ip_pools(self, context): - return self.db.floating_ip_get_pools(context) + return objects.FloatingIP.get_pool_names(context) @wrap_check_policy def get_floating_ip_by_address(self, context, address): - return self.db.floating_ip_get_by_address(context, address) + return objects.FloatingIP.get_by_address(context, address) @wrap_check_policy def get_floating_ips_by_project(self, context): - return self.db.floating_ip_get_all_by_project(context, - context.project_id) + return objects.FloatingIPList.get_by_project(context, + context.project_id) @wrap_check_policy def get_floating_ips_by_fixed_address(self, context, fixed_address): - floating_ips = self.db.floating_ip_get_by_fixed_address(context, - fixed_address) - return [floating_ip['address'] for floating_ip in floating_ips] + floating_ips = objects.FloatingIPList.get_by_fixed_address( + context, fixed_address) + return [str(floating_ip.address) for floating_ip in floating_ips] @wrap_check_policy def get_instance_id_by_floating_address(self, context, address): diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py index 91a7d0a53d..da0e62bcf1 100644 --- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py +++ b/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py @@ -23,8 +23,7 @@ def fake_get_floating_ip_pools(self, context): - return [{'name': 'nova'}, - {'name': 'other'}] + return ['nova', 'other'] class FloatingIpPoolTest(test.NoDBTestCase): @@ -41,16 +40,16 @@ def test_translate_floating_ip_pools_view(self): view = floating_ip_pools._translate_floating_ip_pools_view(pools) self.assertIn('floating_ip_pools', view) self.assertEqual(view['floating_ip_pools'][0]['name'], - pools[0]['name']) + pools[0]) self.assertEqual(view['floating_ip_pools'][1]['name'], - pools[1]['name']) + pools[1]) def test_floating_ips_pools_list(self): req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ip-pools') res_dict = self.controller.index(req) pools = fake_get_floating_ip_pools(None, self.context) - response = {'floating_ip_pools': pools} + response = {'floating_ip_pools': [{'name': name} for name in pools]} self.assertEqual(res_dict, response) diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 148250b105..0783abfa38 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -3022,8 +3022,7 @@ def test_list_floatingippools(self): pool_list = ["pool1", "pool2"] def fake_get_floating_ip_pools(self, context): - return [{'name': pool_list[0]}, - {'name': pool_list[1]}] + return pool_list self.stubs.Set(network_api.API, "get_floating_ip_pools", fake_get_floating_ip_pools) diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index 89164db916..35b3cced3c 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -224,6 +224,53 @@ def test_get_floating_ip_invalid_id(self): self.network_api.get_floating_ip, self.context, '123zzz') + @mock.patch('nova.objects.FloatingIP.get_by_id') + def test_get_floating_ip(self, mock_get): + floating = mock.sentinel.floating + mock_get.return_value = floating + self.assertEqual(floating, + self.network_api.get_floating_ip(self.context, 123)) + mock_get.assert_called_once_with(self.context, 123) + + @mock.patch('nova.objects.FloatingIP.get_pool_names') + def test_get_floating_ip_pools(self, mock_get): + pools = ['foo', 'bar'] + mock_get.return_value = pools + self.assertEqual(pools, + self.network_api.get_floating_ip_pools( + self.context)) + + @mock.patch('nova.objects.FloatingIP.get_by_address') + def test_get_floating_ip_by_address(self, mock_get): + floating = mock.sentinel.floating + mock_get.return_value = floating + self.assertEqual(floating, + self.network_api.get_floating_ip_by_address( + self.context, mock.sentinel.address)) + mock_get.assert_called_once_with(self.context, + mock.sentinel.address) + + @mock.patch('nova.objects.FloatingIPList.get_by_project') + def test_get_floating_ips_by_project(self, mock_get): + floatings = mock.sentinel.floating_ips + mock_get.return_value = floatings + self.assertEqual(floatings, + self.network_api.get_floating_ips_by_project( + self.context)) + mock_get.assert_called_once_with(self.context, + self.context.project_id) + + @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address') + def test_get_floating_ips_by_fixed_address(self, mock_get): + floatings = [objects.FloatingIP(id=1, address='1.2.3.4'), + objects.FloatingIP(id=2, address='5.6.7.8')] + mock_get.return_value = floatings + self.assertEqual(['1.2.3.4', '5.6.7.8'], + self.network_api.get_floating_ips_by_fixed_address( + self.context, mock.sentinel.fixed_address)) + mock_get.assert_called_once_with(self.context, + mock.sentinel.fixed_address) + def _stub_migrate_instance_calls(self, method, multi_host, info): fake_flavor = flavors.get_default_flavor() fake_flavor['rxtx_factor'] = 1.21 From 910f74eee369cf8874bc8aa7f6df7a7f908f7690 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 11 Aug 2014 12:03:09 -0700 Subject: [PATCH 330/486] Remove metadata/metadetails from instance/server groups This removes the half-implemented metadata implementation from server groups. This first patch removes all uses of metadata/metadetails, except what is required to handle existing API users that expect to see an empty metadata key, and existing RPC clients expecting the same in their InstanceGroup objects. Change-Id: Ied2e8b058f9848e76222ca86e8f79105f08ea938 --- .../compute/contrib/server_groups.py | 4 +- nova/db/api.py | 8 +- nova/db/sqlalchemy/api.py | 85 +---------------- nova/db/sqlalchemy/models.py | 21 ----- nova/objects/instance_group.py | 20 ++-- .../compute/contrib/test_server_groups.py | 32 ++----- nova/tests/db/test_db_api.py | 93 +------------------ nova/tests/objects/test_instance_group.py | 31 +------ nova/tests/objects/test_objects.py | 2 +- 9 files changed, 31 insertions(+), 265 deletions(-) diff --git a/nova/api/openstack/compute/contrib/server_groups.py b/nova/api/openstack/compute/contrib/server_groups.py index 3a970f83a3..fdb2d3d0ce 100644 --- a/nova/api/openstack/compute/contrib/server_groups.py +++ b/nova/api/openstack/compute/contrib/server_groups.py @@ -137,7 +137,9 @@ def _format_server_group(self, context, group): server_group['id'] = group.uuid server_group['name'] = group.name server_group['policies'] = group.policies or [] - server_group['metadata'] = group.metadetails or {} + # NOTE(danms): This has been exposed to the user, but never used. + # Since we can't remove it, just make sure it's always empty. + server_group['metadata'] = {} members = [] if group.members: # Display the instances that are not deleted. diff --git a/nova/db/api.py b/nova/db/api.py index 801a0d0fe6..81b38e0a06 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -801,15 +801,13 @@ def instance_remove_security_group(context, instance_id, security_group_id): #################### -def instance_group_create(context, values, policies=None, metadata=None, - members=None): - """Create a new group with metadata. +def instance_group_create(context, values, policies=None, members=None): + """Create a new group. Each group will receive a unique uuid. This will be used for access to the group. """ - return IMPL.instance_group_create(context, values, policies, metadata, - members) + return IMPL.instance_group_create(context, values, policies, members) def instance_group_get(context, group_uuid): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f92e498a68..e52ca35d6d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -5684,8 +5684,7 @@ def archive_deleted_rows(context, max_rows=None): def _instance_group_get_query(context, model_class, id_field=None, id=None, session=None, read_deleted=None): - columns_to_join = {models.InstanceGroup: ['_policies', '_metadata', - '_members']} + columns_to_join = {models.InstanceGroup: ['_policies', '_members']} query = model_query(context, model_class, session=session, read_deleted=read_deleted) @@ -5698,9 +5697,9 @@ def _instance_group_get_query(context, model_class, id_field=None, id=None, return query -def instance_group_create(context, values, policies=None, metadata=None, +def instance_group_create(context, values, policies=None, members=None): - """Create a new group with metadata.""" + """Create a new group.""" uuid = values.get('uuid', None) if uuid is None: uuid = uuidutils.generate_uuid() @@ -5717,14 +5716,10 @@ def instance_group_create(context, values, policies=None, metadata=None, # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this instance group. group._policies = [] - group._metadata = [] group._members = [] if policies: _instance_group_policies_add(context, group.id, policies, session=session) - if metadata: - _instance_group_metadata_add(context, group.id, metadata, - session=session) if members: _instance_group_members_add(context, group.id, members, session=session) @@ -5766,13 +5761,6 @@ def instance_group_update(context, group_uuid, values): values.pop('policies'), set_delete=True, session=session) - metadata = values.get('metadata') - if metadata is not None: - _instance_group_metadata_add(context, - group.id, - values.pop('metadata'), - set_delete=True, - session=session) members = values.get('members') if members is not None: _instance_group_members_add(context, @@ -5785,8 +5773,6 @@ def instance_group_update(context, group_uuid, values): if policies: values['policies'] = policies - if metadata: - values['metadata'] = metadata if members: values['members'] = members @@ -5807,7 +5793,6 @@ def instance_group_delete(context, group_uuid): # Delete policies, metadata and members instance_models = [models.InstanceGroupPolicy, - models.InstanceGroupMetadata, models.InstanceGroupMember] for model in instance_models: model_query(context, model, session=session).\ @@ -5850,70 +5835,6 @@ def _instance_group_id(context, group_uuid, session=None): return result.id -def _instance_group_metadata_add(context, id, metadata, set_delete=False, - session=None): - if not session: - session = get_session() - - with session.begin(subtransactions=True): - all_keys = metadata.keys() - query = _instance_group_model_get_query(context, - models.InstanceGroupMetadata, - id, - session=session) - if set_delete: - query.filter(~models.InstanceGroupMetadata.key.in_(all_keys)).\ - soft_delete(synchronize_session=False) - - query = query.filter(models.InstanceGroupMetadata.key.in_(all_keys)) - already_existing_keys = set() - for meta_ref in query.all(): - key = meta_ref.key - meta_ref.update({'value': metadata[key]}) - already_existing_keys.add(key) - - for key, value in metadata.iteritems(): - if key in already_existing_keys: - continue - meta_ref = models.InstanceGroupMetadata() - meta_ref.update({'key': key, - 'value': value, - 'group_id': id}) - session.add(meta_ref) - - return metadata - - -def instance_group_metadata_add(context, group_uuid, metadata, - set_delete=False): - id = _instance_group_id(context, group_uuid) - return _instance_group_metadata_add(context, id, metadata, - set_delete=set_delete) - - -def instance_group_metadata_delete(context, group_uuid, key): - id = _instance_group_id(context, group_uuid) - count = _instance_group_get_query(context, - models.InstanceGroupMetadata, - models.InstanceGroupMetadata.group_id, - id).\ - filter_by(key=key).\ - soft_delete() - if count == 0: - raise exception.InstanceGroupMetadataNotFound(group_uuid=group_uuid, - metadata_key=key) - - -def instance_group_metadata_get(context, group_uuid): - id = _instance_group_id(context, group_uuid) - rows = model_query(context, - models.InstanceGroupMetadata.key, - models.InstanceGroupMetadata.value, - base_model=models.InstanceGroupMetadata).\ - filter_by(group_id=id).all() - return dict((r[0], r[1]) for r in rows) - - def _instance_group_members_add(context, id, members, set_delete=False, session=None): if not session: diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index e1008515cc..9f985963c9 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -1305,19 +1305,6 @@ class InstanceGroupPolicy(BASE, NovaBase): nullable=False) -class InstanceGroupMetadata(BASE, NovaBase): - """Represents a key/value pair for an instance group.""" - __tablename__ = 'instance_group_metadata' - __table_args__ = ( - Index('instance_group_metadata_key_idx', 'key'), - ) - id = Column(Integer, primary_key=True, nullable=False) - key = Column(String(255)) - value = Column(String(255)) - group_id = Column(Integer, ForeignKey('instance_groups.id'), - nullable=False) - - class InstanceGroup(BASE, NovaBase): """Represents an instance group. @@ -1340,10 +1327,6 @@ class InstanceGroup(BASE, NovaBase): 'InstanceGroup.id == InstanceGroupPolicy.group_id,' 'InstanceGroupPolicy.deleted == 0,' 'InstanceGroup.deleted == 0)') - _metadata = orm.relationship(InstanceGroupMetadata, primaryjoin='and_(' - 'InstanceGroup.id == InstanceGroupMetadata.group_id,' - 'InstanceGroupMetadata.deleted == 0,' - 'InstanceGroup.deleted == 0)') _members = orm.relationship(InstanceGroupMember, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupMember.group_id,' 'InstanceGroupMember.deleted == 0,' @@ -1353,10 +1336,6 @@ class InstanceGroup(BASE, NovaBase): def policies(self): return [p.policy for p in self._policies] - @property - def metadetails(self): - return dict((m.key, m.value) for m in self._metadata) - @property def members(self): return [m.instance_id for m in self._members] diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py index 5e8ea18e17..1e1691701c 100644 --- a/nova/objects/instance_group.py +++ b/nova/objects/instance_group.py @@ -28,7 +28,8 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject): # Version 1.4: Add add_members() # Version 1.5: Add get_hosts() # Version 1.6: Add get_by_name() - VERSION = '1.6' + # Version 1.7: Deprecate metadetails + VERSION = '1.7' fields = { 'id': fields.IntegerField(), @@ -40,10 +41,15 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject): 'name': fields.StringField(nullable=True), 'policies': fields.ListOfStringsField(nullable=True), - 'metadetails': fields.DictOfStringsField(nullable=True), 'members': fields.ListOfStringsField(nullable=True), } + def obj_make_compatible(self, primitive, target_version): + if target_version < (1, 7): + # NOTE(danms): Before 1.7, we had an always-empty + # metadetails property + primitive['metadetails'] = {} + @staticmethod def _from_db_object(context, instance_group, db_inst): """Method to help with migration to objects. @@ -95,11 +101,6 @@ def save(self, context): if not updates: return - metadata = None - if 'metadetails' in updates: - metadata = updates.pop('metadetails') - updates.update({'metadata': metadata}) - db.instance_group_update(context, self.uuid, updates) db_inst = db.instance_group_get(context, self.uuid) self._from_db_object(context, self, db_inst) @@ -122,11 +123,9 @@ def create(self, context): updates.pop('id', None) policies = updates.pop('policies', None) members = updates.pop('members', None) - metadetails = updates.pop('metadetails', None) db_inst = db.instance_group_create(context, updates, policies=policies, - metadata=metadetails, members=members) self._from_db_object(context, self, db_inst) @@ -165,6 +164,8 @@ class InstanceGroupList(base.ObjectListBase, base.NovaObject): # InstanceGroup <= version 1.3 # Version 1.1: InstanceGroup <= version 1.4 # Version 1.2: InstanceGroup <= version 1.5 + # Version 1.3: InstanceGroup <= version 1.6 + # Version 1.4: InstanceGroup <= version 1.7 VERSION = '1.2' fields = { @@ -176,6 +177,7 @@ class InstanceGroupList(base.ObjectListBase, base.NovaObject): '1.1': '1.4', '1.2': '1.5', '1.3': '1.6', + '1.4': '1.7', } @base.remotable_classmethod diff --git a/nova/tests/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/api/openstack/compute/contrib/test_server_groups.py index 62745133d7..54b1241d71 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_groups.py @@ -48,7 +48,6 @@ def server_group_resp_template(**kwargs): sgroup.setdefault('name', 'test') sgroup.setdefault('policies', []) sgroup.setdefault('members', []) - sgroup.setdefault('metadata', {}) return sgroup @@ -66,10 +65,6 @@ def server_group_db(sg): attrs['members'] = members else: attrs['members'] = [] - if 'metadata' in attrs: - attrs['metadetails'] = attrs.pop('metadata') - else: - attrs['metadetails'] = {} attrs['deleted'] = 0 attrs['deleted_at'] = None attrs['created_at'] = None @@ -257,7 +252,7 @@ def test_list_server_group_by_tenant(self): groups = [] policies = ['anti-affinity'] members = [] - metadata = {'key1': 'value1'} + metadata = {} # always empty names = ['default-x', 'test'] sg1 = server_group_resp_template(id=str(1345), name=names[0], @@ -287,7 +282,7 @@ def test_list_server_group_all(self): tenant_groups = [] policies = ['anti-affinity'] members = [] - metadata = {'key1': 'value1'} + metadata = {} # always empty names = ['default-x', 'test'] sg1 = server_group_resp_template(id=str(1345), name=names[0], @@ -428,7 +423,6 @@ def _tag(self, elem): def _verify_server_group(self, raw_group, tree): policies = raw_group['policies'] members = raw_group['members'] - metadata = raw_group['metadata'] self.assertEqual('server_group', self._tag(tree)) self.assertEqual(raw_group['id'], tree.get('id')) self.assertEqual(raw_group['name'], tree.get('name')) @@ -448,16 +442,7 @@ def _verify_server_group(self, raw_group, tree): self.assertEqual(members[idx], gr_child.text) elif child_tag == 'metadata': - self.assertEqual(len(metadata), len(child)) - metas = {} - for idx, gr_child in enumerate(child): - self.assertEqual(self._tag(gr_child), 'meta') - key = gr_child.get('key') - self.assertIn(key, ['key1', 'key2']) - metas[key] = gr_child.text - self.assertEqual(len(metas), len(metadata)) - for k in metadata: - self.assertEqual(metadata[k], metas[k]) + self.assertEqual(0, len(child)) def _verify_server_group_brief(self, raw_group, tree): self.assertEqual('server_group', self._tag(tree)) @@ -467,13 +452,11 @@ def _verify_server_group_brief(self, raw_group, tree): def test_group_serializer(self): policies = ["policy-1", "policy-2"] members = ["1", "2"] - metadata = dict(key1="value1", key2="value2") raw_group = dict( id='890', name='name', policies=policies, - members=members, - metadata=metadata) + members=members) sg_group = dict(server_group=raw_group) text = self.default_serializer.serialize(sg_group) @@ -485,19 +468,16 @@ def test_groups_serializer(self): policies = ["policy-1", "policy-2", "policy-3"] members = ["1", "2", "3"] - metadata = dict(key1="value1", key2="value2") groups = [dict( id='890', name='test', policies=policies[0:2], - members=members[0:2], - metadata=metadata), + members=members[0:2]), dict( id='123', name='default', policies=policies[2:], - members=members[2:], - metadata=metadata)] + members=members[2:])] sg_groups = dict(server_groups=groups) text = self.index_serializer.serialize(sg_groups) diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 437830d527..67fd107a22 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -6667,9 +6667,9 @@ def _get_default_values(self): 'project_id': self.project_id} def _create_instance_group(self, context, values, policies=None, - metadata=None, members=None): + members=None): return db.instance_group_create(context, values, policies=policies, - metadata=metadata, members=members) + members=members) def test_instance_group_create_no_key(self): values = self._get_default_values() @@ -6781,15 +6781,6 @@ def test_instance_group_update(self): db.instance_group_update(self.context, id, values) result = db.instance_group_get(self.context, id) self.assertEqual(result['name'], 'new_fake_name') - # update metadata - values = self._get_default_values() - metadataInput = {'key11': 'value1', - 'key12': 'value2'} - values['metadata'] = metadataInput - db.instance_group_update(self.context, id, values) - result = db.instance_group_get(self.context, id) - metadata = result['metadetails'] - self._assertEqualObjects(metadata, metadataInput) # update update members values = self._get_default_values() members = ['instance_id1', 'instance_id2'] @@ -6810,86 +6801,6 @@ def test_instance_group_update(self): 'invalid_id', values) -class InstanceGroupMetadataDBApiTestCase(InstanceGroupDBApiTestCase): - def test_instance_group_metadata_on_create(self): - values = self._get_default_values() - values['uuid'] = 'fake_id' - metadata = {'key11': 'value1', - 'key12': 'value2'} - result = self._create_instance_group(self.context, values, - metadata=metadata) - ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at', - 'created_at'] - self._assertEqualObjects(result, values, ignored_keys) - self._assertEqualObjects(metadata, result['metadetails']) - - def test_instance_group_metadata_add(self): - values = self._get_default_values() - values['uuid'] = 'fake_id' - result = self._create_instance_group(self.context, values) - id = result['uuid'] - metadata = db.instance_group_metadata_get(self.context, id) - self._assertEqualObjects(metadata, {}) - metadata = {'key1': 'value1', - 'key2': 'value2'} - db.instance_group_metadata_add(self.context, id, metadata) - metadata2 = db.instance_group_metadata_get(self.context, id) - self._assertEqualObjects(metadata, metadata2) - - def test_instance_group_update(self): - values = self._get_default_values() - values['uuid'] = 'fake_id' - result = self._create_instance_group(self.context, values) - id = result['uuid'] - metadata = {'key1': 'value1', - 'key2': 'value2'} - db.instance_group_metadata_add(self.context, id, metadata) - metadata2 = db.instance_group_metadata_get(self.context, id) - self._assertEqualObjects(metadata, metadata2) - # check add with existing keys - metadata = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'} - db.instance_group_metadata_add(self.context, id, metadata) - metadata3 = db.instance_group_metadata_get(self.context, id) - self._assertEqualObjects(metadata, metadata3) - - def test_instance_group_delete(self): - values = self._get_default_values() - values['uuid'] = 'fake_id' - result = self._create_instance_group(self.context, values) - id = result['uuid'] - metadata = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'} - db.instance_group_metadata_add(self.context, id, metadata) - metadata3 = db.instance_group_metadata_get(self.context, id) - self._assertEqualObjects(metadata, metadata3) - db.instance_group_metadata_delete(self.context, id, 'key1') - metadata = db.instance_group_metadata_get(self.context, id) - self.assertNotIn('key1', metadata) - db.instance_group_metadata_delete(self.context, id, 'key2') - metadata = db.instance_group_metadata_get(self.context, id) - self.assertNotIn('key2', metadata) - - def test_instance_group_metadata_invalid_ids(self): - values = self._get_default_values() - result = self._create_instance_group(self.context, values) - id = result['uuid'] - self.assertRaises(exception.InstanceGroupNotFound, - db.instance_group_metadata_get, - self.context, 'invalid') - self.assertRaises(exception.InstanceGroupNotFound, - db.instance_group_metadata_delete, self.context, - 'invalidid', 'key1') - metadata = {'key1': 'value1', - 'key2': 'value2'} - db.instance_group_metadata_add(self.context, id, metadata) - self.assertRaises(exception.InstanceGroupMetadataNotFound, - db.instance_group_metadata_delete, - self.context, id, 'invalidkey') - - class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase): def test_instance_group_members_on_create(self): values = self._get_default_values() diff --git a/nova/tests/objects/test_instance_group.py b/nova/tests/objects/test_instance_group.py index c2093ef047..113e0dd688 100644 --- a/nova/tests/objects/test_instance_group.py +++ b/nova/tests/objects/test_instance_group.py @@ -38,23 +38,19 @@ def _get_default_values(self): 'project_id': self.project_id} def _create_instance_group(self, context, values, policies=None, - metadata=None, members=None): + members=None): return db.instance_group_create(context, values, policies=policies, - metadata=metadata, members=members) + members=members) def test_get_by_uuid(self): values = self._get_default_values() - metadata = {'key11': 'value1', - 'key12': 'value2'} policies = ['policy1', 'policy2'] members = ['instance_id1', 'instance_id2'] db_result = self._create_instance_group(self.context, values, - metadata=metadata, policies=policies, members=members) obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, db_result.uuid) - self.assertEqual(obj_result.metadetails, metadata) self.assertEqual(obj_result.members, members) self.assertEqual(obj_result.policies, policies) @@ -105,18 +101,6 @@ def test_save_members(self): result = db.instance_group_get(self.context, db_result['uuid']) self.assertEqual(result['members'], members) - def test_save_metadata(self): - values = self._get_default_values() - db_result = self._create_instance_group(self.context, values) - obj_result = instance_group.InstanceGroup.get_by_uuid(self.context, - db_result.uuid) - metadata = {'foo': 'bar'} - obj_result.metadetails = metadata - obj_result.save() - db.instance_group_metadata_get(self.context, db_result['uuid']) - for key, value in metadata.iteritems(): - self.assertEqual(value, metadata[key]) - def test_create(self): group1 = instance_group.InstanceGroup() group1.uuid = 'fake-uuid' @@ -150,17 +134,6 @@ def test_create_with_members(self): self.assertEqual(group1.id, group2.id) self.assertEqual(group1.members, group2.members) - def test_create_with_metadata(self): - group1 = instance_group.InstanceGroup() - metadata = {'foo': 'bar'} - group1.metadetails = metadata - group1.create(self.context) - group2 = instance_group.InstanceGroup.get_by_uuid(self.context, - group1.uuid) - self.assertEqual(group1.id, group2.id) - for key, value in metadata.iteritems(): - self.assertEqual(value, group2.metadetails[key]) - def test_recreate_fails(self): group = instance_group.InstanceGroup() group.create(self.context) diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 278e8fe854..59fe895d27 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -924,7 +924,7 @@ def test_object_serialization_iterables(self): 'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7', 'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e', 'InstanceFaultList': '1.1-bd578be60d045629ca7b3ce1a2493ae4', - 'InstanceGroup': '1.6-c032430832b3cbaf92c99088e4b2fdc8', + 'InstanceGroup': '1.7-b31ea31fdb452ab7810adbe789244f91', 'InstanceGroupList': '1.2-bebd07052779ae3b47311efe85428a8b', 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f', 'InstanceList': '1.6-78800140a5f9818ab00f8c052437655f', From 2aed78705763edb723a4fd4598eae855b5847650 Mon Sep 17 00:00:00 2001 From: Michael Turek Date: Mon, 4 Aug 2014 19:04:04 +0000 Subject: [PATCH 331/486] More informative nova-scheduler log after NoValidHost is caught. This patch adds a warning log entry for when a NoValidHost exception causes a scheduling error. The NoValidHost exception's message will be logged before reporting that the machine went to error. Previously, all other exceptions would show a stack trace. However, since the NoValidHost case is skipped, a user may not understand why his/her machine went to an ERROR state. This patch seeks to remove any confusion. Change-Id: I0f01b1095251f5c659fe010c7376f3f6bc56babe Closes-Bug: 1336977 --- nova/scheduler/driver.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 3766ec2e85..d1a2fd4245 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -27,7 +27,7 @@ from nova.compute import vm_states from nova import db from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LW from nova import notifications from nova.openstack.common import importutils from nova.openstack.common import log as logging @@ -52,7 +52,11 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec): send notifications. """ - if not isinstance(ex, exception.NoValidHost): + if isinstance(ex, exception.NoValidHost): + LOG.warning(_LW("NoValidHost exception with message: \'%s\'"), + ex.format_message().strip(), + instance_uuid=instance_uuid) + else: LOG.exception(_("Exception during scheduler.run_instance")) state = vm_states.ERROR.upper() LOG.warning(_('Setting instance to %s state.'), state, From 17f9f14d8b5e68c1a945bbeb5ff674a0e1341012 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 12 Aug 2014 06:05:59 +0000 Subject: [PATCH 332/486] Imported Translations from Transifex Change-Id: If8ec4cd5d057a56bb9aca62bac112f163b1e665c --- nova/locale/de/LC_MESSAGES/nova-log-info.po | 111 +- .../locale/en_AU/LC_MESSAGES/nova-log-info.po | 111 +- .../en_GB/LC_MESSAGES/nova-log-error.po | 367 +- .../locale/en_GB/LC_MESSAGES/nova-log-info.po | 140 +- nova/locale/en_US/LC_MESSAGES/nova.po | 3128 +++++++--------- .../es/LC_MESSAGES/nova-log-critical.po | 13 +- nova/locale/es/LC_MESSAGES/nova-log-error.po | 388 +- nova/locale/es/LC_MESSAGES/nova-log-info.po | 113 +- .../locale/es/LC_MESSAGES/nova-log-warning.po | 279 +- nova/locale/es/LC_MESSAGES/nova.po | 3166 +++++++---------- .../fr/LC_MESSAGES/nova-log-critical.po | 14 +- nova/locale/fr/LC_MESSAGES/nova-log-error.po | 367 +- nova/locale/fr/LC_MESSAGES/nova-log-info.po | 113 +- nova/locale/it/LC_MESSAGES/nova-log-info.po | 113 +- nova/locale/ja/LC_MESSAGES/nova-log-error.po | 367 +- nova/locale/ja/LC_MESSAGES/nova-log-info.po | 111 +- .../ko_KR/LC_MESSAGES/nova-log-error.po | 367 +- .../locale/ko_KR/LC_MESSAGES/nova-log-info.po | 111 +- nova/locale/nova-log-critical.pot | 13 +- nova/locale/nova-log-error.pot | 370 +- nova/locale/nova-log-info.pot | 113 +- nova/locale/nova-log-warning.pot | 275 +- nova/locale/nova.pot | 3085 +++++++--------- .../pt_BR/LC_MESSAGES/nova-log-error.po | 367 +- .../locale/pt_BR/LC_MESSAGES/nova-log-info.po | 111 +- .../zh_CN/LC_MESSAGES/nova-log-error.po | 367 +- .../locale/zh_CN/LC_MESSAGES/nova-log-info.po | 111 +- .../locale/zh_TW/LC_MESSAGES/nova-log-info.po | 111 +- 28 files changed, 8144 insertions(+), 6158 deletions(-) diff --git a/nova/locale/de/LC_MESSAGES/nova-log-info.po b/nova/locale/de/LC_MESSAGES/nova-log-info.po index 6101301ce3..f3dd924aa1 100644 --- a/nova/locale/de/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/de/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: German (http://www.transifex.com/projects/p/nova/language/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -109,139 +148,143 @@ msgstr "Lösche doppelte Zeile mit der ID %(id)s aus der Tabelle %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -298,11 +341,11 @@ msgstr "" msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po index 9f8d2df148..06d38f2769 100644 --- a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (Australia) (http://www.transifex.com/projects/p/nova/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,139 +146,143 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -296,11 +339,11 @@ msgstr "" msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po index 0219e8f891..482a94af32 100644 --- a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" @@ -39,16 +39,265 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Failed to notify cells of instance fault" @@ -68,11 +317,11 @@ msgstr "Unexpected exception occurred %d time(s)... retrying." msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "in fixed duration looping call" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "in dynamic looping call" @@ -121,137 +370,151 @@ msgstr "DB exception wrapped." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -272,15 +535,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -299,8 +566,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po index dfc10157d2..609e89ce4d 100644 --- a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po @@ -3,12 +3,13 @@ # This file is distributed under the same license as the nova project. # # Translators: +# Andi Chandler , 2014 msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" -"PO-Revision-Date: 2014-07-16 14:42+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-08-07 07:51+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" "nova/language/en_GB/)\n" @@ -19,11 +20,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s returned with HTTP %(status)d" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "V3 API has been disabled by configuration" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "Fault thrown: %s" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP exception thrown: %s" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "Deleting network with id %s" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +"During sync_power_state the instance has a pending task (%(task)s). Skip." + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format @@ -38,7 +79,7 @@ msgstr "Created lock path: %s" #: nova/openstack/common/lockutils.py:251 #, python-format msgid "Failed to remove file %(file)s" -msgstr "" +msgstr "Failed to remove file %(file)s" #: nova/openstack/common/periodic_task.py:126 #, python-format @@ -90,7 +131,7 @@ msgstr "Caught %s, stopping children" #: nova/openstack/common/service.py:403 msgid "Wait called after thread killed. Cleaning up." -msgstr "" +msgstr "Wait called after thread killed. Cleaning up." #: nova/openstack/common/service.py:414 #, python-format @@ -105,102 +146,109 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" #: nova/scheduler/filters/utils.py:50 #, python-format msgid "%(num_values)d values found, of which the minimum value will be used." -msgstr "" +msgstr "%(num_values)d values found, of which the minimum value will be used." -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" -msgstr "" +msgstr "instance chain %s disappeared during refresh, skipping" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "Unable to force TCG mode, libguestfs too old?" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" +"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "Instance destroyed successfully." -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "Instance may be started again." -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "Going to destroy instance again." -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "Beginning live snapshot process" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "Beginning cold snapshot process" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "Snapshot extracted, beginning image upload" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "Snapshot image upload complete" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "Instance soft rebooted successfully." -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "Instance shutdown successfully." -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "Instance may have been rebooted during soft reboot, so return now." -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "Instance rebooted successfully." -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "Instance spawned successfully." -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Truncated console log returned, %d bytes ignored" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "Creating image" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "Using config drive" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creating config drive at %(path)s" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" -msgstr "" +msgstr "Configuring timezone for windows instance to localtime" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" +"Getting block stats failed, device might have been detached. Instance=" +"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -209,39 +257,39 @@ msgstr "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" -msgstr "" +msgstr "Instance launched has CPU info: %s" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "Instance running successfully." -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" -msgstr "" +msgstr "Deleting instance files %s" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" -msgstr "" +msgstr "Deletion of %s failed" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" -msgstr "" +msgstr "Deletion of %s complete" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "Called setup_basic_filtering in nwfilter" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "Ensuring static filters" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "Attempted to unfilter instance which is not filtered" @@ -301,13 +349,15 @@ msgstr "Corrupt base files: %s" msgid "Removable base files: %s" msgstr "Removable base files: %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" -msgstr "" +msgstr "findmnt tool is not installed" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " "duration: %(duration).2f secs for image %(image_id)s" msgstr "" +"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " +"duration: %(duration).2f secs for image %(image_id)s" diff --git a/nova/locale/en_US/LC_MESSAGES/nova.po b/nova/locale/en_US/LC_MESSAGES/nova.po index 394e7283b2..e3f9f93888 100644 --- a/nova/locale/en_US/LC_MESSAGES/nova.po +++ b/nova/locale/en_US/LC_MESSAGES/nova.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Nova\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/nova\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2013-01-21 18:28+0000\n" "Last-Translator: Jeremy Stanley \n" "Language-Team: en_US \n" @@ -17,39 +17,39 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/block_device.py:100 +#: nova/block_device.py:102 msgid "Some fields are invalid." msgstr "" -#: nova/block_device.py:110 +#: nova/block_device.py:112 msgid "Some required fields are missing" msgstr "" -#: nova/block_device.py:126 +#: nova/block_device.py:128 msgid "Boot index is invalid." msgstr "" -#: nova/block_device.py:169 +#: nova/block_device.py:171 msgid "Unrecognized legacy format." msgstr "" -#: nova/block_device.py:186 +#: nova/block_device.py:188 msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:190 +#: nova/block_device.py:192 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:369 +#: nova/block_device.py:371 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:373 +#: nova/block_device.py:375 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:383 +#: nova/block_device.py:385 msgid "Invalid volume_size." msgstr "" @@ -333,7 +333,7 @@ msgstr "" msgid "Group not valid. Reason: %(reason)s" msgstr "Group not valid. Reason: %(reason)s" -#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:58 +#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:57 msgid "Sort key supplied was not valid." msgstr "Sort key supplied was not valid." @@ -705,60 +705,65 @@ msgid "" msgstr "" #: nova/exception.py:654 +#, python-format +msgid "Physical network is missing for network %(network_uuid)s" +msgstr "" + +#: nova/exception.py:658 msgid "Could not find the datastore reference(s) which the VM uses." msgstr "Could not find the datastore reference(s) which the VM uses." -#: nova/exception.py:658 +#: nova/exception.py:662 #, python-format msgid "Port %(port_id)s is still in use." msgstr "Port %(port_id)s is still in use." -#: nova/exception.py:662 +#: nova/exception.py:666 #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "" -#: nova/exception.py:666 +#: nova/exception.py:670 #, fuzzy, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Network could not be found for instance %(instance_id)s." -#: nova/exception.py:670 +#: nova/exception.py:674 #, fuzzy, python-format msgid "No free port available for instance %(instance)s." msgstr "Network could not be found for instance %(instance_id)s." -#: nova/exception.py:674 +#: nova/exception.py:678 #, python-format msgid "Fixed ip %(address)s already exists." msgstr "" -#: nova/exception.py:678 +#: nova/exception.py:682 #, python-format msgid "No fixed IP associated with id %(id)s." msgstr "No fixed IP associated with id %(id)s." -#: nova/exception.py:682 +#: nova/exception.py:686 #, python-format msgid "Fixed ip not found for address %(address)s." msgstr "Fixed ip not found for address %(address)s." -#: nova/exception.py:686 +#: nova/exception.py:690 #, python-format msgid "Instance %(instance_uuid)s has zero fixed ips." msgstr "Instance %(instance_uuid)s has zero fixed ips." -#: nova/exception.py:690 +#: nova/exception.py:694 #, python-format msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." msgstr "Network host %(host)s has zero fixed ips in network %(network_id)s." -#: nova/exception.py:695 +#: nova/exception.py:699 #, python-format msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." msgstr "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." -#: nova/exception.py:699 +#: nova/exception.py:703 #, python-format msgid "" "Fixed IP address (%(address)s) does not exist in network " @@ -767,7 +772,7 @@ msgstr "" "Fixed IP address (%(address)s) does not exist in network " "(%(network_uuid)s)." -#: nova/exception.py:704 +#: nova/exception.py:708 #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance " @@ -776,126 +781,126 @@ msgstr "" "Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s." -#: nova/exception.py:709 +#: nova/exception.py:713 #, python-format msgid "More than one instance is associated with fixed ip address '%(address)s'." msgstr "More than one instance is associated with fixed ip address '%(address)s'." -#: nova/exception.py:714 +#: nova/exception.py:718 #, python-format msgid "Fixed IP address %(address)s is invalid." msgstr "Fixed IP address %(address)s is invalid." -#: nova/exception.py:719 +#: nova/exception.py:723 msgid "Zero fixed ips available." msgstr "Zero fixed ips available." -#: nova/exception.py:723 +#: nova/exception.py:727 msgid "Zero fixed ips could be found." msgstr "Zero fixed ips could be found." -#: nova/exception.py:727 +#: nova/exception.py:731 #, python-format msgid "Floating ip %(address)s already exists." msgstr "Floating ip %(address)s already exists." -#: nova/exception.py:732 +#: nova/exception.py:736 #, python-format msgid "Floating ip not found for id %(id)s." msgstr "Floating ip not found for id %(id)s." -#: nova/exception.py:736 +#: nova/exception.py:740 #, python-format msgid "The DNS entry %(name)s already exists in domain %(domain)s." msgstr "The DNS entry %(name)s already exists in domain %(domain)s." -#: nova/exception.py:740 +#: nova/exception.py:744 #, python-format msgid "Floating ip not found for address %(address)s." msgstr "Floating ip not found for address %(address)s." -#: nova/exception.py:744 +#: nova/exception.py:748 #, python-format msgid "Floating ip not found for host %(host)s." msgstr "Floating ip not found for host %(host)s." -#: nova/exception.py:748 +#: nova/exception.py:752 #, python-format msgid "Multiple floating ips are found for address %(address)s." msgstr "Multiple floating ips are found for address %(address)s." -#: nova/exception.py:752 +#: nova/exception.py:756 msgid "Floating ip pool not found." msgstr "Floating ip pool not found." -#: nova/exception.py:757 +#: nova/exception.py:761 msgid "Zero floating ips available." msgstr "Zero floating ips available." -#: nova/exception.py:763 +#: nova/exception.py:767 #, python-format msgid "Floating ip %(address)s is associated." msgstr "Floating ip %(address)s is associated." -#: nova/exception.py:767 +#: nova/exception.py:771 #, python-format msgid "Floating ip %(address)s is not associated." msgstr "Floating ip %(address)s is not associated." -#: nova/exception.py:771 +#: nova/exception.py:775 msgid "Zero floating ips exist." msgstr "Zero floating ips exist." -#: nova/exception.py:776 +#: nova/exception.py:780 #, python-format msgid "Interface %(interface)s not found." msgstr "Interface %(interface)s not found." -#: nova/exception.py:781 nova/api/openstack/compute/contrib/floating_ips.py:97 +#: nova/exception.py:785 nova/api/openstack/compute/contrib/floating_ips.py:98 msgid "Cannot disassociate auto assigned floating ip" msgstr "Cannot disassociate auto assigned floating ip" -#: nova/exception.py:786 +#: nova/exception.py:790 #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "Keypair %(name)s not found for user %(user_id)s" -#: nova/exception.py:790 +#: nova/exception.py:794 #, python-format msgid "Service %(service_id)s could not be found." msgstr "Service %(service_id)s could not be found." -#: nova/exception.py:794 +#: nova/exception.py:798 #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "" -#: nova/exception.py:798 +#: nova/exception.py:802 #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "" -#: nova/exception.py:802 +#: nova/exception.py:806 #, python-format msgid "Host %(host)s could not be found." msgstr "Host %(host)s could not be found." -#: nova/exception.py:806 +#: nova/exception.py:810 #, python-format msgid "Compute host %(host)s could not be found." msgstr "Compute host %(host)s could not be found." -#: nova/exception.py:810 +#: nova/exception.py:814 #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Could not find binary %(binary)s on host %(host)s." -#: nova/exception.py:814 +#: nova/exception.py:818 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Invalid reservation expiration %(expire)s." -#: nova/exception.py:818 +#: nova/exception.py:822 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " @@ -904,73 +909,78 @@ msgstr "" "Change would make usage less than 0 for the following resources: " "%(unders)s" -#: nova/exception.py:823 +#: nova/exception.py:827 +#, python-format +msgid "Wrong quota method %(method)s used on resource %(res)s" +msgstr "" + +#: nova/exception.py:831 msgid "Quota could not be found" msgstr "Quota could not be found" -#: nova/exception.py:827 +#: nova/exception.py:835 #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" -#: nova/exception.py:832 +#: nova/exception.py:840 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Unknown quota resources %(unknown)s." -#: nova/exception.py:836 +#: nova/exception.py:844 #, python-format msgid "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:841 +#: nova/exception.py:849 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Quota for project %(project_id)s could not be found." -#: nova/exception.py:845 +#: nova/exception.py:853 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Quota class %(class_name)s could not be found." -#: nova/exception.py:849 +#: nova/exception.py:857 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "Quota usage for project %(project_id)s could not be found." -#: nova/exception.py:853 +#: nova/exception.py:861 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "Quota reservation %(uuid)s could not be found." -#: nova/exception.py:857 +#: nova/exception.py:865 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quota exceeded for resources: %(overs)s" -#: nova/exception.py:861 +#: nova/exception.py:869 #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Security group %(security_group_id)s not found." -#: nova/exception.py:865 +#: nova/exception.py:873 #, python-format msgid "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "Security group %(security_group_id)s not found for project %(project_id)s." -#: nova/exception.py:870 +#: nova/exception.py:878 #, python-format msgid "Security group with rule %(rule_id)s not found." msgstr "Security group with rule %(rule_id)s not found." -#: nova/exception.py:875 +#: nova/exception.py:883 #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" -#: nova/exception.py:880 +#: nova/exception.py:888 #, python-format msgid "" "Security group %(security_group_id)s is already associated with the " @@ -979,7 +989,7 @@ msgstr "" "Security group %(security_group_id)s is already associated with the " "instance %(instance_id)s" -#: nova/exception.py:885 +#: nova/exception.py:893 #, python-format msgid "" "Security group %(security_group_id)s is not associated with the instance " @@ -988,49 +998,49 @@ msgstr "" "Security group %(security_group_id)s is not associated with the instance " "%(instance_id)s" -#: nova/exception.py:890 +#: nova/exception.py:898 #, fuzzy, python-format msgid "Security group default rule (%rule_id)s not found." msgstr "Security group with rule %(rule_id)s not found." -#: nova/exception.py:894 +#: nova/exception.py:902 msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" -#: nova/exception.py:900 +#: nova/exception.py:908 #, python-format msgid "Rule already exists in group: %(rule)s" msgstr "" -#: nova/exception.py:904 +#: nova/exception.py:912 msgid "No Unique Match Found." msgstr "" -#: nova/exception.py:909 +#: nova/exception.py:917 #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "Migration %(migration_id)s could not be found." -#: nova/exception.py:913 +#: nova/exception.py:921 #, python-format msgid "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "Migration not found for instance %(instance_id)s with status %(status)s." -#: nova/exception.py:918 +#: nova/exception.py:926 #, python-format msgid "Console pool %(pool_id)s could not be found." msgstr "Console pool %(pool_id)s could not be found." -#: nova/exception.py:922 +#: nova/exception.py:930 #, python-format msgid "" "Console pool with host %(host)s, console_type %(console_type)s and " "compute_host %(compute_host)s already exists." msgstr "" -#: nova/exception.py:928 +#: nova/exception.py:936 #, python-format msgid "" "Console pool of type %(console_type)s for compute host %(compute_host)s " @@ -1039,17 +1049,17 @@ msgstr "" "Console pool of type %(console_type)s for compute host %(compute_host)s " "on proxy host %(host)s not found." -#: nova/exception.py:934 +#: nova/exception.py:942 #, python-format msgid "Console %(console_id)s could not be found." msgstr "Console %(console_id)s could not be found." -#: nova/exception.py:938 +#: nova/exception.py:946 #, python-format msgid "Console for instance %(instance_uuid)s could not be found." msgstr "Console for instance %(instance_uuid)s could not be found." -#: nova/exception.py:942 +#: nova/exception.py:950 #, python-format msgid "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " @@ -1058,244 +1068,244 @@ msgstr "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " "found." -#: nova/exception.py:947 +#: nova/exception.py:955 #, fuzzy, python-format msgid "Invalid console type %(console_type)s" msgstr "Invalid console type %(console_type)s " -#: nova/exception.py:951 +#: nova/exception.py:959 #, python-format msgid "Unavailable console type %(console_type)s." msgstr "" -#: nova/exception.py:955 +#: nova/exception.py:963 #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" -#: nova/exception.py:960 +#: nova/exception.py:968 #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Flavor %(flavor_id)s could not be found." -#: nova/exception.py:964 +#: nova/exception.py:972 #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "" -#: nova/exception.py:968 +#: nova/exception.py:976 #, fuzzy, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "Flavor access not found for %(flavor_id) / %(project_id) combination." -#: nova/exception.py:973 +#: nova/exception.py:981 #, python-format msgid "" "Flavor %(id)d extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" -#: nova/exception.py:978 +#: nova/exception.py:986 #, fuzzy, python-format msgid "Cell %(cell_name)s doesn't exist." msgstr "pool %s doesn't exist" -#: nova/exception.py:982 +#: nova/exception.py:990 #, python-format msgid "Cell with name %(name)s already exists." msgstr "" -#: nova/exception.py:986 +#: nova/exception.py:994 #, python-format msgid "Inconsistency in cell routing: %(reason)s" msgstr "" -#: nova/exception.py:990 +#: nova/exception.py:998 #, python-format msgid "Service API method not found: %(detail)s" msgstr "" -#: nova/exception.py:994 +#: nova/exception.py:1002 #, fuzzy msgid "Timeout waiting for response from cell" msgstr "Timed out waiting for RPC response: %s" -#: nova/exception.py:998 +#: nova/exception.py:1006 #, python-format msgid "Cell message has reached maximum hop count: %(hop_count)s" msgstr "" -#: nova/exception.py:1002 +#: nova/exception.py:1010 msgid "No cells available matching scheduling criteria." msgstr "" -#: nova/exception.py:1006 +#: nova/exception.py:1014 msgid "Cannot update cells configuration file." msgstr "" -#: nova/exception.py:1010 +#: nova/exception.py:1018 #, fuzzy, python-format msgid "Cell is not known for instance %(instance_uuid)s" msgstr "Destroying VDIs for Instance %(instance_uuid)s" -#: nova/exception.py:1014 +#: nova/exception.py:1022 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Scheduler Host Filter %(filter_name)s could not be found." -#: nova/exception.py:1018 +#: nova/exception.py:1026 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" -#: nova/exception.py:1023 +#: nova/exception.py:1031 #, python-format msgid "" "Metric %(name)s could not be found on the compute host node " "%(host)s.%(node)s." msgstr "" -#: nova/exception.py:1028 +#: nova/exception.py:1036 #, python-format msgid "File %(file_path)s could not be found." msgstr "File %(file_path)s could not be found." -#: nova/exception.py:1032 +#: nova/exception.py:1040 msgid "Zero files could be found." msgstr "Zero files could be found." -#: nova/exception.py:1036 +#: nova/exception.py:1044 #, python-format msgid "Virtual switch associated with the network adapter %(adapter)s not found." msgstr "Virtual switch associated with the network adapter %(adapter)s not found." -#: nova/exception.py:1041 +#: nova/exception.py:1049 #, python-format msgid "Network adapter %(adapter)s could not be found." msgstr "Network adapter %(adapter)s could not be found." -#: nova/exception.py:1045 +#: nova/exception.py:1053 #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Class %(class_name)s could not be found: %(exception)s" -#: nova/exception.py:1049 +#: nova/exception.py:1057 msgid "Action not allowed." msgstr "Action not allowed." -#: nova/exception.py:1053 +#: nova/exception.py:1061 msgid "Rotation is not allowed for snapshots" msgstr "Rotation is not allowed for snapshots" -#: nova/exception.py:1057 +#: nova/exception.py:1065 msgid "Rotation param is required for backup image_type" msgstr "Rotation param is required for backup image_type" -#: nova/exception.py:1062 nova/tests/compute/test_keypairs.py:144 +#: nova/exception.py:1070 nova/tests/compute/test_keypairs.py:146 #, fuzzy, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "Key pair %(key_name)s already exists." -#: nova/exception.py:1066 +#: nova/exception.py:1074 #, python-format msgid "Instance %(name)s already exists." msgstr "Instance %(name)s already exists." -#: nova/exception.py:1070 +#: nova/exception.py:1078 #, python-format msgid "Flavor with name %(name)s already exists." msgstr "" -#: nova/exception.py:1074 +#: nova/exception.py:1082 #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "" -#: nova/exception.py:1078 +#: nova/exception.py:1086 #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" -#: nova/exception.py:1083 +#: nova/exception.py:1091 #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s is not on shared storage: %(reason)s" -#: nova/exception.py:1087 +#: nova/exception.py:1095 #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s is not on local storage: %(reason)s" -#: nova/exception.py:1091 +#: nova/exception.py:1099 #, python-format msgid "Storage error: %(reason)s" msgstr "" -#: nova/exception.py:1095 +#: nova/exception.py:1103 #, python-format msgid "Migration error: %(reason)s" msgstr "" -#: nova/exception.py:1099 +#: nova/exception.py:1107 #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "" -#: nova/exception.py:1103 +#: nova/exception.py:1111 #, python-format msgid "Malformed message body: %(reason)s" msgstr "Malformed message body: %(reason)s" -#: nova/exception.py:1109 +#: nova/exception.py:1117 #, python-format msgid "Could not find config at %(path)s" msgstr "Could not find config at %(path)s" -#: nova/exception.py:1113 +#: nova/exception.py:1121 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Could not load paste app '%(name)s' from %(path)s" -#: nova/exception.py:1117 +#: nova/exception.py:1125 msgid "When resizing, instances must change flavor!" msgstr "When resizing, instances must change flavor!" -#: nova/exception.py:1121 +#: nova/exception.py:1129 #, python-format msgid "Resize error: %(reason)s" msgstr "" -#: nova/exception.py:1125 +#: nova/exception.py:1133 #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" -#: nova/exception.py:1129 +#: nova/exception.py:1137 msgid "Flavor's memory is too small for requested image." msgstr "" -#: nova/exception.py:1133 +#: nova/exception.py:1141 msgid "Flavor's disk is too small for requested image." msgstr "" -#: nova/exception.py:1137 +#: nova/exception.py:1145 #, python-format msgid "Insufficient free memory on compute node to start %(uuid)s." msgstr "Insufficient free memory on compute node to start %(uuid)s." -#: nova/exception.py:1141 +#: nova/exception.py:1149 #, python-format msgid "No valid host was found. %(reason)s" msgstr "No valid host was found. %(reason)s" -#: nova/exception.py:1146 +#: nova/exception.py:1154 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "" -#: nova/exception.py:1153 +#: nova/exception.py:1161 #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " @@ -1304,45 +1314,45 @@ msgstr "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " "%(used)d of %(allowed)d %(resource)s" -#: nova/exception.py:1158 +#: nova/exception.py:1166 msgid "Maximum number of floating ips exceeded" msgstr "Maximum number of floating ips exceeded" -#: nova/exception.py:1162 +#: nova/exception.py:1170 #, fuzzy msgid "Maximum number of fixed ips exceeded" msgstr "Maximum number of floating ips exceeded" -#: nova/exception.py:1166 +#: nova/exception.py:1174 #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Maximum number of metadata items exceeds %(allowed)d" -#: nova/exception.py:1170 +#: nova/exception.py:1178 msgid "Personality file limit exceeded" msgstr "Personality file limit exceeded" -#: nova/exception.py:1174 +#: nova/exception.py:1182 msgid "Personality file path too long" msgstr "Personality file path too long" -#: nova/exception.py:1178 +#: nova/exception.py:1186 msgid "Personality file content too long" msgstr "Personality file content too long" -#: nova/exception.py:1182 nova/tests/compute/test_keypairs.py:155 +#: nova/exception.py:1190 nova/tests/compute/test_keypairs.py:157 msgid "Maximum number of key pairs exceeded" msgstr "Maximum number of key pairs exceeded" -#: nova/exception.py:1187 +#: nova/exception.py:1195 msgid "Maximum number of security groups or rules exceeded" msgstr "Maximum number of security groups or rules exceeded" -#: nova/exception.py:1191 +#: nova/exception.py:1199 msgid "Maximum number of ports exceeded" msgstr "" -#: nova/exception.py:1195 +#: nova/exception.py:1203 #, python-format msgid "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " @@ -1351,130 +1361,130 @@ msgstr "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " "%(reason)s." -#: nova/exception.py:1200 +#: nova/exception.py:1208 #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Aggregate %(aggregate_id)s could not be found." -#: nova/exception.py:1204 +#: nova/exception.py:1212 #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "Aggregate %(aggregate_name)s already exists." -#: nova/exception.py:1208 +#: nova/exception.py:1216 #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "Aggregate %(aggregate_id)s has no host %(host)s." -#: nova/exception.py:1212 +#: nova/exception.py:1220 #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." -#: nova/exception.py:1217 +#: nova/exception.py:1225 #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "Aggregate %(aggregate_id)s already has host %(host)s." -#: nova/exception.py:1221 +#: nova/exception.py:1229 msgid "Unable to create flavor" msgstr "" -#: nova/exception.py:1225 +#: nova/exception.py:1233 #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "Failed to set admin password on %(instance)s because %(reason)s" -#: nova/exception.py:1231 +#: nova/exception.py:1239 #, python-format msgid "Detected existing vlan with id %(vlan)d" msgstr "Detected existing vlan with id %(vlan)d" -#: nova/exception.py:1235 +#: nova/exception.py:1243 msgid "There was a conflict when trying to complete your request." msgstr "" -#: nova/exception.py:1241 +#: nova/exception.py:1249 #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "Instance %(instance_id)s could not be found." -#: nova/exception.py:1245 +#: nova/exception.py:1253 #, fuzzy, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "Console for instance %(instance_uuid)s could not be found." -#: nova/exception.py:1250 +#: nova/exception.py:1258 #, fuzzy, python-format msgid "Node %(node_id)s could not be found." msgstr "Volume %(volume_id)s could not be found." -#: nova/exception.py:1254 +#: nova/exception.py:1262 #, fuzzy, python-format msgid "Node with UUID %(node_uuid)s could not be found." msgstr "Port %(port_id)s could not be found." -#: nova/exception.py:1258 +#: nova/exception.py:1266 #, python-format msgid "Marker %(marker)s could not be found." msgstr "Marker %(marker)s could not be found." -#: nova/exception.py:1263 +#: nova/exception.py:1271 #, python-format msgid "Invalid id: %(val)s (expecting \"i-...\")." msgstr "Invalid id: %(val)s (expecting \"i-...\")." -#: nova/exception.py:1267 +#: nova/exception.py:1275 #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Could not fetch image %(image_id)s" -#: nova/exception.py:1271 +#: nova/exception.py:1279 #, fuzzy, python-format msgid "Could not upload image %(image_id)s" msgstr "Could not fetch image %(image_id)s" -#: nova/exception.py:1275 +#: nova/exception.py:1283 #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "Task %(task_name)s is already running on host %(host)s" -#: nova/exception.py:1279 +#: nova/exception.py:1287 #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "Task %(task_name)s is not running on host %(host)s" -#: nova/exception.py:1283 +#: nova/exception.py:1291 #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "Instance %(instance_uuid)s is locked" -#: nova/exception.py:1287 +#: nova/exception.py:1295 #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "" -#: nova/exception.py:1291 +#: nova/exception.py:1299 #, python-format msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" -#: nova/exception.py:1296 +#: nova/exception.py:1304 #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "Unknown config drive format %(format)s. Select one of iso9660 or vfat." -#: nova/exception.py:1301 -#, fuzzy, python-format -msgid "Failed to attach network adapter device to %(instance)s" -msgstr "Failed to dealloc network for deleted instance" +#: nova/exception.py:1309 +#, python-format +msgid "Failed to attach network adapter device to %(instance_uuid)s" +msgstr "" -#: nova/exception.py:1305 -#, fuzzy, python-format -msgid "Failed to detach network adapter device from %(instance)s" -msgstr "Failed to dealloc network for deleted instance" +#: nova/exception.py:1314 +#, python-format +msgid "Failed to detach network adapter device from %(instance_uuid)s" +msgstr "" -#: nova/exception.py:1309 +#: nova/exception.py:1319 #, python-format msgid "" "User data too large. User data must be no larger than %(maxsize)s bytes " @@ -1483,329 +1493,342 @@ msgstr "" "User data too large. User data must be no larger than %(maxsize)s bytes " "once base64 encoded. Your data is %(length)d bytes" -#: nova/exception.py:1315 +#: nova/exception.py:1325 msgid "User data needs to be valid base 64." msgstr "User data needs to be valid base 64." -#: nova/exception.py:1319 +#: nova/exception.py:1329 #, python-format msgid "" "Unexpected task state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1328 +#: nova/exception.py:1338 #, fuzzy, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not " "found" msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -#: nova/exception.py:1333 +#: nova/exception.py:1343 #, fuzzy, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Keypair %(name)s not found for user %(user_id)s" -#: nova/exception.py:1337 +#: nova/exception.py:1347 #, python-format msgid "" "Unexpected VM state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1342 +#: nova/exception.py:1352 #, python-format msgid "The CA file for %(project)s could not be found" msgstr "The CA file for %(project)s could not be found" -#: nova/exception.py:1346 +#: nova/exception.py:1356 #, python-format msgid "The CRL file for %(project)s could not be found" msgstr "The CRL file for %(project)s could not be found" -#: nova/exception.py:1350 +#: nova/exception.py:1360 msgid "Instance recreate is not supported." msgstr "" -#: nova/exception.py:1354 +#: nova/exception.py:1364 #, python-format msgid "" "The service from servicegroup driver %(driver)s is temporarily " "unavailable." msgstr "" -#: nova/exception.py:1359 +#: nova/exception.py:1369 #, python-format msgid "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" -#: nova/exception.py:1364 +#: nova/exception.py:1374 #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" -#: nova/exception.py:1369 +#: nova/exception.py:1379 #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt " "driver" msgstr "" -#: nova/exception.py:1374 +#: nova/exception.py:1384 #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "" -#: nova/exception.py:1378 +#: nova/exception.py:1388 #, fuzzy, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Instance %(instance_id)s is not in rescue mode" -#: nova/exception.py:1382 +#: nova/exception.py:1392 #, fuzzy, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "Instance %(instance_id)s is not in rescue mode" -#: nova/exception.py:1387 +#: nova/exception.py:1397 #, fuzzy, python-format msgid "Shadow table with name %(name)s already exists." msgstr "Instance Type with name %(name)s already exists." -#: nova/exception.py:1392 +#: nova/exception.py:1402 #, python-format msgid "Instance rollback performed due to: %s" msgstr "" -#: nova/exception.py:1398 +#: nova/exception.py:1408 #, fuzzy, python-format msgid "Unsupported object type %(objtype)s" msgstr "Expected object of type: %s" -#: nova/exception.py:1402 +#: nova/exception.py:1412 #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "" -#: nova/exception.py:1406 +#: nova/exception.py:1416 #, python-format msgid "Version %(objver)s of %(objname)s is not supported" msgstr "" -#: nova/exception.py:1410 +#: nova/exception.py:1420 #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "" -#: nova/exception.py:1414 +#: nova/exception.py:1424 #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "" -#: nova/exception.py:1418 +#: nova/exception.py:1428 #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "" -#: nova/exception.py:1422 +#: nova/exception.py:1432 #, python-format msgid "Core API extensions are missing: %(missing_apis)s" msgstr "" -#: nova/exception.py:1426 +#: nova/exception.py:1436 #, python-format msgid "Error during following call to agent: %(method)s" msgstr "" -#: nova/exception.py:1430 +#: nova/exception.py:1440 #, python-format msgid "Unable to contact guest agent. The following call timed out: %(method)s" msgstr "" -#: nova/exception.py:1435 +#: nova/exception.py:1445 #, python-format msgid "Agent does not support the call: %(method)s" msgstr "" -#: nova/exception.py:1439 +#: nova/exception.py:1449 #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "" -#: nova/exception.py:1443 +#: nova/exception.py:1453 #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "" -#: nova/exception.py:1447 +#: nova/exception.py:1457 #, python-format msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s." msgstr "" -#: nova/exception.py:1452 +#: nova/exception.py:1462 #, python-format msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s." msgstr "" -#: nova/exception.py:1457 +#: nova/exception.py:1467 #, python-format msgid "Instance group %(group_uuid)s has no policy %(policy)s." msgstr "" -#: nova/exception.py:1461 +#: nova/exception.py:1471 #, python-format msgid "Number of retries to plugin (%(num_retries)d) exceeded." msgstr "" -#: nova/exception.py:1465 +#: nova/exception.py:1475 #, python-format msgid "There was an error with the download module %(module)s. %(reason)s" msgstr "" -#: nova/exception.py:1470 +#: nova/exception.py:1480 #, python-format msgid "" "The metadata for this location will not work with this module %(module)s." " %(reason)s." msgstr "" -#: nova/exception.py:1475 +#: nova/exception.py:1485 #, python-format msgid "The method %(method_name)s is not implemented." msgstr "" -#: nova/exception.py:1479 +#: nova/exception.py:1489 #, python-format msgid "The module %(module)s is misconfigured: %(reason)s." msgstr "" -#: nova/exception.py:1483 +#: nova/exception.py:1493 #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "" -#: nova/exception.py:1487 +#: nova/exception.py:1497 #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "" -#: nova/exception.py:1491 +#: nova/exception.py:1501 +#, python-format +msgid "" +"Invalid PCI Whitelist: The PCI address %(address)s has an invalid " +"%(field)s." +msgstr "" + +#: nova/exception.py:1506 +msgid "" +"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, " +"but not both" +msgstr "" + +#: nova/exception.py:1512 #, python-format msgid "PCI device %(id)s not found" msgstr "" -#: nova/exception.py:1495 +#: nova/exception.py:1516 #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "" -#: nova/exception.py:1499 +#: nova/exception.py:1520 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" -#: nova/exception.py:1505 +#: nova/exception.py:1526 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead " "of %(hopeowner)s" msgstr "" -#: nova/exception.py:1511 +#: nova/exception.py:1532 #, python-format msgid "PCI device request (%requests)s failed" msgstr "" -#: nova/exception.py:1516 +#: nova/exception.py:1537 #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty " "pool" msgstr "" -#: nova/exception.py:1522 +#: nova/exception.py:1543 #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "" -#: nova/exception.py:1526 +#: nova/exception.py:1547 #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "" -#: nova/exception.py:1531 +#: nova/exception.py:1552 #, python-format msgid "Not enough parameters: %(reason)s" msgstr "" -#: nova/exception.py:1536 +#: nova/exception.py:1557 #, python-format msgid "Invalid PCI devices Whitelist config %(reason)s" msgstr "" -#: nova/exception.py:1540 +#: nova/exception.py:1561 #, python-format msgid "Cannot change %(node_id)s to %(new_node_id)s" msgstr "" -#: nova/exception.py:1550 +#: nova/exception.py:1571 #, python-format msgid "" "Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: " "%(reason)s" msgstr "" -#: nova/exception.py:1555 +#: nova/exception.py:1576 #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "" -#: nova/exception.py:1559 +#: nova/exception.py:1580 #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "" -#: nova/exception.py:1563 +#: nova/exception.py:1584 #, python-format msgid "Key manager error: %(reason)s" msgstr "" -#: nova/exception.py:1567 +#: nova/exception.py:1588 #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "" -#: nova/exception.py:1571 +#: nova/exception.py:1592 #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "" -#: nova/exception.py:1575 +#: nova/exception.py:1596 #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" -#: nova/exception.py:1580 +#: nova/exception.py:1601 #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the " "maximum allowed by flavor %(max_vram)d." msgstr "" -#: nova/exception.py:1585 +#: nova/exception.py:1606 #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "" -#: nova/exception.py:1589 +#: nova/exception.py:1610 msgid "" "Live migration of instances with config drives is not supported in " "libvirt unless libvirt instance path and drive data is shared across " "compute nodes." msgstr "" -#: nova/exception.py:1595 +#: nova/exception.py:1616 #, python-format msgid "" "Host %(server)s is running an old version of Nova, live migrations " @@ -1813,32 +1836,37 @@ msgid "" "and try again." msgstr "" -#: nova/exception.py:1601 +#: nova/exception.py:1622 #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" -#: nova/exception.py:1605 +#: nova/exception.py:1626 #, python-format msgid "" "Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d" msgstr "" -#: nova/exception.py:1610 +#: nova/exception.py:1631 #, python-format msgid "" "Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d" msgstr "" -#: nova/exception.py:1615 +#: nova/exception.py:1636 #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to" " satisfy for vcpus count %(vcpus)d" msgstr "" +#: nova/exception.py:1641 +#, python-format +msgid "Architecture name '%(arch)s' is not recognised" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -1853,12 +1881,12 @@ msgstr "Failed to send state update notification" msgid "Failed to get nw_info" msgstr "Failed to get info for disk %s" -#: nova/quota.py:1326 +#: nova/quota.py:1332 #, python-format msgid "Failed to commit reservations %s" msgstr "" -#: nova/quota.py:1349 +#: nova/quota.py:1355 #, python-format msgid "Failed to roll back reservations %s" msgstr "" @@ -1942,37 +1970,41 @@ msgstr "Invalid server_string: %s" msgid "Could not remove tmpdir: %s" msgstr "Could not remove tmpdir: %s" -#: nova/utils.py:963 +#: nova/utils.py:964 +msgid "The input is not a string or unicode" +msgstr "" + +#: nova/utils.py:966 #, fuzzy, python-format msgid "%s is not a string or unicode" msgstr "Server name is not a string or unicode" -#: nova/utils.py:967 +#: nova/utils.py:973 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" -#: nova/utils.py:972 +#: nova/utils.py:978 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "" -#: nova/utils.py:982 +#: nova/utils.py:988 #, python-format msgid "%(value_name)s must be an integer" msgstr "" -#: nova/utils.py:988 +#: nova/utils.py:994 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "" -#: nova/utils.py:994 +#: nova/utils.py:1000 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "" -#: nova/utils.py:1028 +#: nova/utils.py:1034 #, python-format msgid "Hypervisor version %s is invalid." msgstr "" @@ -1992,22 +2024,22 @@ msgstr "" msgid "%(name)s listening on %(host)s:%(port)s" msgstr "%(name)s listening on %(host)s:%(port)s" -#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:50 +#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:47 #, fuzzy, python-format msgid "Unable to find cert_file : %s" msgstr "Unable to find address %r" -#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:53 +#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:50 #, fuzzy, python-format msgid "Unable to find ca_file : %s" msgstr "Unable to find address %r" -#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:56 +#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:53 #, fuzzy, python-format msgid "Unable to find key_file : %s" msgstr "Unable to find address %r" -#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:59 +#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:56 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" @@ -2030,229 +2062,191 @@ msgstr "WSGI server has stopped." msgid "You must implement __call__" msgstr "You must implement __call__" -#: nova/api/auth.py:72 -msgid "ratelimit_v3 is removed from v3 api." -msgstr "" - -#: nova/api/auth.py:135 +#: nova/api/auth.py:136 msgid "Invalid service catalog json." msgstr "Invalid service catalog json." -#: nova/api/auth.py:159 -msgid "Sourcing roles from deprecated X-Role HTTP header" -msgstr "Sourcing roles from deprecated X-Role HTTP header" - #: nova/api/sizelimit.py:53 nova/api/sizelimit.py:62 nova/api/sizelimit.py:76 #: nova/api/metadata/password.py:62 msgid "Request is too large." msgstr "Request is too large." -#: nova/api/ec2/__init__.py:88 +#: nova/api/ec2/__init__.py:89 #, python-format msgid "FaultWrapper: %s" msgstr "FaultWrapper: %s" -#: nova/api/ec2/__init__.py:159 +#: nova/api/ec2/__init__.py:160 msgid "Too many failed authentications." msgstr "Too many failed authentications." -#: nova/api/ec2/__init__.py:168 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." -msgstr "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." - -#: nova/api/ec2/__init__.py:187 +#: nova/api/ec2/__init__.py:188 msgid "Signature not provided" msgstr "Signature not provided" -#: nova/api/ec2/__init__.py:192 +#: nova/api/ec2/__init__.py:193 msgid "Access key not provided" msgstr "Access key not provided" -#: nova/api/ec2/__init__.py:228 nova/api/ec2/__init__.py:244 +#: nova/api/ec2/__init__.py:229 nova/api/ec2/__init__.py:245 msgid "Failure communicating with keystone" msgstr "Failure communicating with keystone" -#: nova/api/ec2/__init__.py:304 +#: nova/api/ec2/__init__.py:305 #, fuzzy msgid "Timestamp failed validation." msgstr "Too many failed authentications." -#: nova/api/ec2/__init__.py:402 +#: nova/api/ec2/__init__.py:403 #, python-format msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "Unauthorized request for controller=%(controller)s and action=%(action)s" -#: nova/api/ec2/__init__.py:492 -#, python-format -msgid "Unexpected %(ex_name)s raised: %(ex_str)s" -msgstr "" - -#: nova/api/ec2/__init__.py:495 -#, python-format -msgid "%(ex_name)s raised: %(ex_str)s" -msgstr "" - -#: nova/api/ec2/__init__.py:519 -#, python-format -msgid "Environment: %s" -msgstr "Environment: %s" - -#: nova/api/ec2/__init__.py:521 +#: nova/api/ec2/__init__.py:522 msgid "Unknown error occurred." msgstr "" -#: nova/api/ec2/cloud.py:391 +#: nova/api/ec2/cloud.py:392 #, python-format msgid "Create snapshot of volume %s" msgstr "Create snapshot of volume %s" -#: nova/api/ec2/cloud.py:416 +#: nova/api/ec2/cloud.py:417 #, python-format msgid "Could not find key pair(s): %s" msgstr "Could not find key pair(s): %s" -#: nova/api/ec2/cloud.py:432 +#: nova/api/ec2/cloud.py:433 #, python-format msgid "Create key pair %s" msgstr "Create key pair %s" -#: nova/api/ec2/cloud.py:444 +#: nova/api/ec2/cloud.py:445 #, python-format msgid "Import key %s" msgstr "Import key %s" -#: nova/api/ec2/cloud.py:457 +#: nova/api/ec2/cloud.py:458 #, python-format msgid "Delete key pair %s" msgstr "Delete key pair %s" -#: nova/api/ec2/cloud.py:599 nova/api/ec2/cloud.py:729 +#: nova/api/ec2/cloud.py:600 nova/api/ec2/cloud.py:730 msgid "need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:604 +#: nova/api/ec2/cloud.py:605 msgid "can't build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:612 +#: nova/api/ec2/cloud.py:613 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "" -#: nova/api/ec2/cloud.py:646 nova/api/ec2/cloud.py:682 +#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:683 msgid "No rule for the specified parameters." msgstr "No rule for the specified parameters." -#: nova/api/ec2/cloud.py:760 +#: nova/api/ec2/cloud.py:761 #, python-format msgid "Get console output for instance %s" msgstr "Get console output for instance %s" -#: nova/api/ec2/cloud.py:832 +#: nova/api/ec2/cloud.py:833 #, python-format msgid "Create volume from snapshot %s" msgstr "Create volume from snapshot %s" -#: nova/api/ec2/cloud.py:836 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:837 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "Create volume of %s GB" -#: nova/api/ec2/cloud.py:876 +#: nova/api/ec2/cloud.py:877 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -#: nova/api/ec2/cloud.py:906 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:907 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "Detach volume %s" -#: nova/api/ec2/cloud.py:1238 +#: nova/api/ec2/cloud.py:1261 msgid "Allocate address" msgstr "Allocate address" -#: nova/api/ec2/cloud.py:1243 +#: nova/api/ec2/cloud.py:1266 #, python-format msgid "Release address %s" msgstr "Release address %s" -#: nova/api/ec2/cloud.py:1248 +#: nova/api/ec2/cloud.py:1271 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "Associate address %(public_ip)s to instance %(instance_id)s" -#: nova/api/ec2/cloud.py:1258 +#: nova/api/ec2/cloud.py:1281 msgid "Unable to associate IP Address, no fixed_ips." msgstr "Unable to associate IP Address, no fixed_ips." -#: nova/api/ec2/cloud.py:1266 -#: nova/api/openstack/compute/contrib/floating_ips.py:251 -#, python-format -msgid "multiple fixed_ips exist, using the first: %s" -msgstr "multiple fixed_ips exist, using the first: %s" - -#: nova/api/ec2/cloud.py:1279 +#: nova/api/ec2/cloud.py:1302 #, python-format msgid "Disassociate address %s" msgstr "Disassociate address %s" -#: nova/api/ec2/cloud.py:1296 nova/api/openstack/compute/servers.py:918 +#: nova/api/ec2/cloud.py:1319 nova/api/openstack/compute/servers.py:920 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "min_count must be <= max_count" -#: nova/api/ec2/cloud.py:1328 +#: nova/api/ec2/cloud.py:1351 msgid "Image must be available" msgstr "Image must be available" -#: nova/api/ec2/cloud.py:1424 +#: nova/api/ec2/cloud.py:1451 #, python-format msgid "Reboot instance %r" msgstr "Reboot instance %r" -#: nova/api/ec2/cloud.py:1537 +#: nova/api/ec2/cloud.py:1566 #, python-format msgid "De-registering image %s" msgstr "De-registering image %s" -#: nova/api/ec2/cloud.py:1553 +#: nova/api/ec2/cloud.py:1582 msgid "imageLocation is required" msgstr "imageLocation is required" -#: nova/api/ec2/cloud.py:1573 +#: nova/api/ec2/cloud.py:1602 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "Registered image %(image_location)s with id %(image_id)s" -#: nova/api/ec2/cloud.py:1634 +#: nova/api/ec2/cloud.py:1663 msgid "user or group not specified" msgstr "user or group not specified" -#: nova/api/ec2/cloud.py:1637 +#: nova/api/ec2/cloud.py:1666 msgid "only group \"all\" is supported" msgstr "only group \"all\" is supported" -#: nova/api/ec2/cloud.py:1640 +#: nova/api/ec2/cloud.py:1669 msgid "operation_type must be add or remove" msgstr "operation_type must be add or remove" -#: nova/api/ec2/cloud.py:1642 +#: nova/api/ec2/cloud.py:1671 #, python-format msgid "Updating image %s publicity" msgstr "Updating image %s publicity" -#: nova/api/ec2/cloud.py:1655 +#: nova/api/ec2/cloud.py:1684 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "Not allowed to modify attributes for image %s" -#: nova/api/ec2/cloud.py:1685 +#: nova/api/ec2/cloud.py:1714 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " @@ -2261,51 +2255,51 @@ msgstr "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" -#: nova/api/ec2/cloud.py:1718 +#: nova/api/ec2/cloud.py:1747 #, python-format msgid "" "Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " "%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1742 +#: nova/api/ec2/cloud.py:1771 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "image of %(instance)s at %(now)s" -#: nova/api/ec2/cloud.py:1767 nova/api/ec2/cloud.py:1817 +#: nova/api/ec2/cloud.py:1796 nova/api/ec2/cloud.py:1846 msgid "resource_id and tag are required" msgstr "" -#: nova/api/ec2/cloud.py:1771 nova/api/ec2/cloud.py:1821 +#: nova/api/ec2/cloud.py:1800 nova/api/ec2/cloud.py:1850 #, fuzzy msgid "Expecting a list of resources" msgstr "Getting list of instances" -#: nova/api/ec2/cloud.py:1776 nova/api/ec2/cloud.py:1826 -#: nova/api/ec2/cloud.py:1884 +#: nova/api/ec2/cloud.py:1805 nova/api/ec2/cloud.py:1855 +#: nova/api/ec2/cloud.py:1913 #, fuzzy msgid "Only instances implemented" msgstr "instance not present" -#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1830 +#: nova/api/ec2/cloud.py:1809 nova/api/ec2/cloud.py:1859 #, fuzzy msgid "Expecting a list of tagSets" msgstr "Getting list of instances" -#: nova/api/ec2/cloud.py:1786 nova/api/ec2/cloud.py:1839 +#: nova/api/ec2/cloud.py:1815 nova/api/ec2/cloud.py:1868 msgid "Expecting tagSet to be key/value pairs" msgstr "" -#: nova/api/ec2/cloud.py:1793 +#: nova/api/ec2/cloud.py:1822 msgid "Expecting both key and value to be set" msgstr "" -#: nova/api/ec2/cloud.py:1844 +#: nova/api/ec2/cloud.py:1873 msgid "Expecting key to be set" msgstr "" -#: nova/api/ec2/cloud.py:1918 +#: nova/api/ec2/cloud.py:1947 msgid "Invalid CIDR" msgstr "Invalid CIDR" @@ -2323,238 +2317,146 @@ msgstr "" msgid "Timestamp is invalid." msgstr "The request is invalid." -#: nova/api/metadata/handler.py:112 -msgid "" -"X-Instance-ID present in request headers. The " -"'service_neutron_metadata_proxy' option must be enabled to process this " -"header." -msgstr "" - -#: nova/api/metadata/handler.py:141 nova/api/metadata/handler.py:148 +#: nova/api/metadata/handler.py:148 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "Failed to get metadata for ip: %s" -#: nova/api/metadata/handler.py:143 nova/api/metadata/handler.py:199 +#: nova/api/metadata/handler.py:150 nova/api/metadata/handler.py:207 msgid "An unknown error has occurred. Please try your request again." msgstr "An unknown error has occurred. Please try your request again." -#: nova/api/metadata/handler.py:161 +#: nova/api/metadata/handler.py:169 msgid "X-Instance-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:163 +#: nova/api/metadata/handler.py:171 msgid "X-Tenant-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:165 +#: nova/api/metadata/handler.py:173 msgid "Multiple X-Instance-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:167 +#: nova/api/metadata/handler.py:175 msgid "Multiple X-Tenant-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:181 -#, python-format -msgid "" -"X-Instance-ID-Signature: %(signature)s does not match the expected value:" -" %(expected_signature)s for id: %(instance_id)s. Request From: " -"%(remote_address)s" -msgstr "" - -#: nova/api/metadata/handler.py:190 +#: nova/api/metadata/handler.py:198 #, fuzzy msgid "Invalid proxy request signature." msgstr "Invalid request: %s" -#: nova/api/metadata/handler.py:197 nova/api/metadata/handler.py:204 +#: nova/api/metadata/handler.py:205 #, fuzzy, python-format msgid "Failed to get metadata for instance id: %s" msgstr "Failed to get metadata for ip: %s" -#: nova/api/metadata/handler.py:208 -#, python-format -msgid "" -"Tenant_id %(tenant_id)s does not match tenant_id of instance " -"%(instance_id)s." -msgstr "" - -#: nova/api/metadata/vendordata_json.py:47 -msgid "file does not exist" -msgstr "" - -#: nova/api/metadata/vendordata_json.py:49 -msgid "Unexpected IOError when reading" -msgstr "" - -#: nova/api/metadata/vendordata_json.py:52 -msgid "failed to load json" -msgstr "" - -#: nova/api/openstack/__init__.py:89 +#: nova/api/openstack/__init__.py:92 #, python-format msgid "Caught error: %s" msgstr "Caught error: %s" -#: nova/api/openstack/__init__.py:98 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s returned with HTTP %(status)d" - -#: nova/api/openstack/__init__.py:186 +#: nova/api/openstack/__init__.py:189 msgid "Must specify an ExtensionManager class" msgstr "Must specify an ExtensionManager class" -#: nova/api/openstack/__init__.py:232 nova/api/openstack/__init__.py:406 -#, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" -msgstr "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" - -#: nova/api/openstack/__init__.py:279 -#: nova/api/openstack/compute/plugins/v3/servers.py:99 -#, python-format -msgid "Not loading %s because it is in the blacklist" -msgstr "" - -#: nova/api/openstack/__init__.py:284 -#: nova/api/openstack/compute/plugins/v3/servers.py:104 -#, python-format -msgid "Not loading %s because it is not in the whitelist" -msgstr "" - -#: nova/api/openstack/__init__.py:291 -msgid "V3 API has been disabled by configuration" -msgstr "" - -#: nova/api/openstack/__init__.py:304 -#, python-format -msgid "Extensions in both blacklist and whitelist: %s" -msgstr "" - -#: nova/api/openstack/__init__.py:328 -#, fuzzy, python-format -msgid "Missing core API extensions: %s" -msgstr "Loading extension %s" - -#: nova/api/openstack/common.py:132 -#, python-format -msgid "" -"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. " -"Bad upgrade or db corrupted?" -msgstr "" -"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. " -"Bad upgrade or db corrupted?" - -#: nova/api/openstack/common.py:182 +#: nova/api/openstack/common.py:185 #, python-format msgid "%s param must be an integer" msgstr "" -#: nova/api/openstack/common.py:185 +#: nova/api/openstack/common.py:188 #, python-format msgid "%s param must be positive" msgstr "" -#: nova/api/openstack/common.py:210 +#: nova/api/openstack/common.py:213 msgid "offset param must be an integer" msgstr "offset param must be an integer" -#: nova/api/openstack/common.py:216 +#: nova/api/openstack/common.py:219 msgid "limit param must be an integer" msgstr "limit param must be an integer" -#: nova/api/openstack/common.py:220 +#: nova/api/openstack/common.py:223 msgid "limit param must be positive" msgstr "limit param must be positive" -#: nova/api/openstack/common.py:224 +#: nova/api/openstack/common.py:227 msgid "offset param must be positive" msgstr "offset param must be positive" -#: nova/api/openstack/common.py:276 +#: nova/api/openstack/common.py:280 #, python-format msgid "href %s does not contain version" msgstr "href %s does not contain version" -#: nova/api/openstack/common.py:291 +#: nova/api/openstack/common.py:293 msgid "Image metadata limit exceeded" msgstr "Image metadata limit exceeded" -#: nova/api/openstack/common.py:299 +#: nova/api/openstack/common.py:301 msgid "Image metadata key cannot be blank" msgstr "Image metadata key cannot be blank" -#: nova/api/openstack/common.py:302 +#: nova/api/openstack/common.py:304 msgid "Image metadata key too long" msgstr "Image metadata key too long" -#: nova/api/openstack/common.py:305 +#: nova/api/openstack/common.py:307 msgid "Invalid image metadata" msgstr "Invalid image metadata" -#: nova/api/openstack/common.py:368 +#: nova/api/openstack/common.py:370 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "Cannot '%(action)s' while instance is in %(attr)s %(state)s" -#: nova/api/openstack/common.py:371 +#: nova/api/openstack/common.py:373 #, python-format msgid "Cannot '%s' an instance which has never been active" msgstr "" -#: nova/api/openstack/common.py:374 +#: nova/api/openstack/common.py:376 #, fuzzy, python-format msgid "Instance is in an invalid state for '%s'" msgstr "Instance is in an invalid state for '%(action)s'" -#: nova/api/openstack/common.py:454 -msgid "Rejecting snapshot request, snapshots currently disabled" -msgstr "Rejecting snapshot request, snapshots currently disabled" - -#: nova/api/openstack/common.py:456 +#: nova/api/openstack/common.py:458 msgid "Instance snapshots are not permitted at this time." msgstr "Instance snapshots are not permitted at this time." -#: nova/api/openstack/common.py:577 +#: nova/api/openstack/common.py:579 msgid "Cells is not enabled." msgstr "" -#: nova/api/openstack/extensions.py:197 +#: nova/api/openstack/extensions.py:198 #, python-format msgid "Loaded extension: %s" msgstr "Loaded extension: %s" -#: nova/api/openstack/extensions.py:243 +#: nova/api/openstack/extensions.py:244 #: nova/api/openstack/compute/plugins/__init__.py:51 #, python-format msgid "Exception loading extension: %s" msgstr "Exception loading extension: %s" -#: nova/api/openstack/extensions.py:278 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "Failed to load extension %(ext_factory)s: %(exc)s" - -#: nova/api/openstack/extensions.py:349 +#: nova/api/openstack/extensions.py:350 #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "Failed to load extension %(classpath)s: %(exc)s" -#: nova/api/openstack/extensions.py:372 +#: nova/api/openstack/extensions.py:373 #, fuzzy, python-format msgid "Failed to load extension %(ext_name)s:%(exc)s" msgstr "Failed to load extension %(ext_name)s: %(exc)s" -#: nova/api/openstack/extensions.py:494 +#: nova/api/openstack/extensions.py:495 msgid "Unexpected exception in API method" msgstr "" -#: nova/api/openstack/extensions.py:495 +#: nova/api/openstack/extensions.py:496 #, python-format msgid "" "Unexpected API Error. Please report this at " @@ -2563,56 +2465,41 @@ msgid "" "%s" msgstr "" -#: nova/api/openstack/wsgi.py:228 nova/api/openstack/wsgi.py:633 +#: nova/api/openstack/wsgi.py:230 nova/api/openstack/wsgi.py:635 msgid "cannot understand JSON" msgstr "cannot understand JSON" -#: nova/api/openstack/wsgi.py:638 +#: nova/api/openstack/wsgi.py:640 msgid "too many body keys" msgstr "too many body keys" -#: nova/api/openstack/wsgi.py:682 -#, python-format -msgid "Exception handling resource: %s" -msgstr "Exception handling resource: %s" - -#: nova/api/openstack/wsgi.py:686 -#, python-format -msgid "Fault thrown: %s" -msgstr "Fault thrown: %s" - -#: nova/api/openstack/wsgi.py:689 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP exception thrown: %s" - -#: nova/api/openstack/wsgi.py:919 +#: nova/api/openstack/wsgi.py:921 #, python-format msgid "There is no such action: %s" msgstr "There is no such action: %s" -#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:949 +#: nova/api/openstack/wsgi.py:924 nova/api/openstack/wsgi.py:951 #: nova/api/openstack/compute/server_metadata.py:57 #: nova/api/openstack/compute/server_metadata.py:75 #: nova/api/openstack/compute/server_metadata.py:100 #: nova/api/openstack/compute/server_metadata.py:126 -#: nova/api/openstack/compute/contrib/evacuate.py:45 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:58 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:73 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:95 +#: nova/api/openstack/compute/contrib/evacuate.py:47 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:60 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:75 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:97 msgid "Malformed request body" msgstr "Malformed request body" -#: nova/api/openstack/wsgi.py:926 +#: nova/api/openstack/wsgi.py:928 #, python-format msgid "Action: '%(action)s', body: %(body)s" msgstr "" -#: nova/api/openstack/wsgi.py:946 +#: nova/api/openstack/wsgi.py:948 msgid "Unsupported Content-Type" msgstr "Unsupported Content-Type" -#: nova/api/openstack/wsgi.py:958 +#: nova/api/openstack/wsgi.py:960 #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " @@ -2641,7 +2528,7 @@ msgid "Initializing extension manager." msgstr "Initializing extension manager." #: nova/api/openstack/compute/flavors.py:107 -#: nova/api/openstack/compute/plugins/v3/flavors.py:70 +#: nova/api/openstack/compute/plugins/v3/flavors.py:72 #, python-format msgid "Invalid is_public filter [%s]" msgstr "Invalid is_public filter [%s]" @@ -2657,57 +2544,57 @@ msgid "Invalid minDisk filter [%s]" msgstr "Invalid minDisk filter [%s]" #: nova/api/openstack/compute/flavors.py:146 -#: nova/api/openstack/compute/servers.py:603 -#: nova/api/openstack/compute/plugins/v3/flavors.py:110 -#: nova/api/openstack/compute/plugins/v3/servers.py:280 +#: nova/api/openstack/compute/servers.py:606 +#: nova/api/openstack/compute/plugins/v3/flavors.py:112 +#: nova/api/openstack/compute/plugins/v3/servers.py:303 #, python-format msgid "marker [%s] not found" msgstr "marker [%s] not found" -#: nova/api/openstack/compute/image_metadata.py:35 -#: nova/api/openstack/compute/images.py:141 -#: nova/api/openstack/compute/images.py:157 +#: nova/api/openstack/compute/image_metadata.py:37 +#: nova/api/openstack/compute/images.py:135 +#: nova/api/openstack/compute/images.py:151 msgid "Image not found." msgstr "Image not found." -#: nova/api/openstack/compute/image_metadata.py:78 +#: nova/api/openstack/compute/image_metadata.py:81 msgid "Incorrect request body format" msgstr "Incorrect request body format" -#: nova/api/openstack/compute/image_metadata.py:82 +#: nova/api/openstack/compute/image_metadata.py:85 #: nova/api/openstack/compute/server_metadata.py:79 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:108 #: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:77 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:79 msgid "Request body and URI mismatch" msgstr "Request body and URI mismatch" -#: nova/api/openstack/compute/image_metadata.py:85 +#: nova/api/openstack/compute/image_metadata.py:88 #: nova/api/openstack/compute/server_metadata.py:83 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:111 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:81 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:83 msgid "Request body contains too many items" msgstr "Request body contains too many items" -#: nova/api/openstack/compute/image_metadata.py:117 +#: nova/api/openstack/compute/image_metadata.py:122 msgid "Invalid metadata key" msgstr "Invalid metadata key" -#: nova/api/openstack/compute/images.py:162 +#: nova/api/openstack/compute/images.py:156 msgid "You are not allowed to delete the image." msgstr "" #: nova/api/openstack/compute/ips.py:67 -#: nova/api/openstack/compute/plugins/v3/ips.py:39 +#: nova/api/openstack/compute/plugins/v3/ips.py:41 msgid "Instance does not exist" msgstr "Instance does not exist" #: nova/api/openstack/compute/ips.py:90 -#: nova/api/openstack/compute/plugins/v3/ips.py:60 +#: nova/api/openstack/compute/plugins/v3/ips.py:62 msgid "Instance is not a member of specified network" msgstr "Instance is not a member of specified network" -#: nova/api/openstack/compute/limits.py:161 +#: nova/api/openstack/compute/limits.py:162 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " @@ -2716,215 +2603,202 @@ msgstr "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." -#: nova/api/openstack/compute/limits.py:287 +#: nova/api/openstack/compute/limits.py:288 msgid "This request was rate-limited." msgstr "This request was rate-limited." #: nova/api/openstack/compute/server_metadata.py:37 #: nova/api/openstack/compute/server_metadata.py:122 #: nova/api/openstack/compute/server_metadata.py:177 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:41 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:43 msgid "Server does not exist" msgstr "Server does not exist" #: nova/api/openstack/compute/server_metadata.py:157 #: nova/api/openstack/compute/server_metadata.py:168 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:144 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:156 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:146 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:158 msgid "Metadata item was not found" msgstr "Metadata item was not found" -#: nova/api/openstack/compute/servers.py:81 -msgid "" -"XML support has been deprecated and may be removed as early as the Juno " -"release." -msgstr "" - -#: nova/api/openstack/compute/servers.py:551 -#: nova/api/openstack/compute/contrib/cells.py:423 -#: nova/api/openstack/compute/plugins/v3/cells.py:331 +#: nova/api/openstack/compute/servers.py:554 +#: nova/api/openstack/compute/contrib/cells.py:427 msgid "Invalid changes-since value" msgstr "Invalid changes-since value" -#: nova/api/openstack/compute/servers.py:570 -#: nova/api/openstack/compute/plugins/v3/servers.py:234 +#: nova/api/openstack/compute/servers.py:573 +#: nova/api/openstack/compute/plugins/v3/servers.py:257 msgid "Only administrators may list deleted instances" msgstr "Only administrators may list deleted instances" -#: nova/api/openstack/compute/servers.py:606 -#: nova/api/openstack/compute/plugins/v3/servers.py:283 -#, fuzzy, python-format -msgid "Flavor '%s' could not be found " -msgstr "Host '%s' could not be found." - -#: nova/api/openstack/compute/servers.py:625 -#: nova/api/openstack/compute/servers.py:772 -#: nova/api/openstack/compute/servers.py:1081 +#: nova/api/openstack/compute/servers.py:627 +#: nova/api/openstack/compute/servers.py:774 +#: nova/api/openstack/compute/servers.py:1078 #: nova/api/openstack/compute/servers.py:1203 #: nova/api/openstack/compute/servers.py:1388 -#: nova/api/openstack/compute/plugins/v3/servers.py:617 -#: nova/api/openstack/compute/plugins/v3/servers.py:729 -#: nova/api/openstack/compute/plugins/v3/servers.py:848 +#: nova/api/openstack/compute/plugins/v3/servers.py:650 +#: nova/api/openstack/compute/plugins/v3/servers.py:768 +#: nova/api/openstack/compute/plugins/v3/servers.py:889 msgid "Instance could not be found" msgstr "Instance could not be found" -#: nova/api/openstack/compute/servers.py:656 +#: nova/api/openstack/compute/servers.py:658 #, python-format msgid "Bad personality format: missing %s" msgstr "Bad personality format: missing %s" -#: nova/api/openstack/compute/servers.py:659 +#: nova/api/openstack/compute/servers.py:661 msgid "Bad personality format" msgstr "Bad personality format" -#: nova/api/openstack/compute/servers.py:662 +#: nova/api/openstack/compute/servers.py:664 #, python-format msgid "Personality content for %s cannot be decoded" msgstr "Personality content for %s cannot be decoded" -#: nova/api/openstack/compute/servers.py:677 +#: nova/api/openstack/compute/servers.py:679 msgid "Unknown argument : port" msgstr "" -#: nova/api/openstack/compute/servers.py:680 -#: nova/api/openstack/compute/plugins/v3/servers.py:338 +#: nova/api/openstack/compute/servers.py:682 +#: nova/api/openstack/compute/plugins/v3/servers.py:361 #, python-format msgid "Bad port format: port uuid is not in proper format (%s)" msgstr "Bad port format: port uuid is not in proper format (%s)" -#: nova/api/openstack/compute/servers.py:690 -#: nova/api/openstack/compute/plugins/v3/servers.py:354 +#: nova/api/openstack/compute/servers.py:692 +#: nova/api/openstack/compute/plugins/v3/servers.py:377 #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "Bad networks format: network uuid is not in proper format (%s)" -#: nova/api/openstack/compute/servers.py:701 -#: nova/api/openstack/compute/plugins/v3/servers.py:327 +#: nova/api/openstack/compute/servers.py:703 +#: nova/api/openstack/compute/plugins/v3/servers.py:350 #, python-format msgid "Invalid fixed IP address (%s)" msgstr "Invalid fixed IP address (%s)" -#: nova/api/openstack/compute/servers.py:714 -#: nova/api/openstack/compute/plugins/v3/servers.py:369 +#: nova/api/openstack/compute/servers.py:716 +#: nova/api/openstack/compute/plugins/v3/servers.py:392 #, python-format msgid "Duplicate networks (%s) are not allowed" msgstr "Duplicate networks (%s) are not allowed" -#: nova/api/openstack/compute/servers.py:720 -#: nova/api/openstack/compute/plugins/v3/servers.py:375 +#: nova/api/openstack/compute/servers.py:722 +#: nova/api/openstack/compute/plugins/v3/servers.py:398 #, python-format msgid "Bad network format: missing %s" msgstr "Bad network format: missing %s" -#: nova/api/openstack/compute/servers.py:723 -#: nova/api/openstack/compute/servers.py:824 -#: nova/api/openstack/compute/plugins/v3/servers.py:378 +#: nova/api/openstack/compute/servers.py:725 +#: nova/api/openstack/compute/servers.py:826 +#: nova/api/openstack/compute/plugins/v3/servers.py:401 msgid "Bad networks format" msgstr "Bad networks format" -#: nova/api/openstack/compute/servers.py:749 +#: nova/api/openstack/compute/servers.py:751 msgid "Userdata content cannot be decoded" msgstr "Userdata content cannot be decoded" -#: nova/api/openstack/compute/servers.py:754 +#: nova/api/openstack/compute/servers.py:756 msgid "accessIPv4 is not proper IPv4 format" msgstr "accessIPv4 is not proper IPv4 format" -#: nova/api/openstack/compute/servers.py:759 +#: nova/api/openstack/compute/servers.py:761 msgid "accessIPv6 is not proper IPv6 format" msgstr "accessIPv6 is not proper IPv6 format" -#: nova/api/openstack/compute/servers.py:788 -#: nova/api/openstack/compute/plugins/v3/servers.py:419 +#: nova/api/openstack/compute/servers.py:790 +#: nova/api/openstack/compute/plugins/v3/servers.py:443 msgid "Server name is not defined" msgstr "Server name is not defined" -#: nova/api/openstack/compute/servers.py:840 -#: nova/api/openstack/compute/servers.py:968 +#: nova/api/openstack/compute/servers.py:842 +#: nova/api/openstack/compute/servers.py:970 msgid "Invalid flavorRef provided." msgstr "Invalid flavorRef provided." -#: nova/api/openstack/compute/servers.py:880 +#: nova/api/openstack/compute/servers.py:882 msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" -#: nova/api/openstack/compute/servers.py:965 -#: nova/api/openstack/compute/plugins/v3/servers.py:495 +#: nova/api/openstack/compute/servers.py:967 +#: nova/api/openstack/compute/plugins/v3/servers.py:519 msgid "Can not find requested image" msgstr "Can not find requested image" -#: nova/api/openstack/compute/servers.py:971 -#: nova/api/openstack/compute/plugins/v3/servers.py:501 +#: nova/api/openstack/compute/servers.py:973 +#: nova/api/openstack/compute/plugins/v3/servers.py:525 msgid "Invalid key_name provided." msgstr "Invalid key_name provided." -#: nova/api/openstack/compute/servers.py:974 -#: nova/api/openstack/compute/plugins/v3/servers.py:504 +#: nova/api/openstack/compute/servers.py:976 +#: nova/api/openstack/compute/plugins/v3/servers.py:528 msgid "Invalid config_drive provided." msgstr "" -#: nova/api/openstack/compute/servers.py:1066 +#: nova/api/openstack/compute/servers.py:1063 msgid "HostId cannot be updated." msgstr "HostId cannot be updated." -#: nova/api/openstack/compute/servers.py:1070 +#: nova/api/openstack/compute/servers.py:1067 #, fuzzy msgid "Personality cannot be updated." msgstr "HostId cannot be updated." -#: nova/api/openstack/compute/servers.py:1096 -#: nova/api/openstack/compute/servers.py:1115 -#: nova/api/openstack/compute/plugins/v3/servers.py:628 -#: nova/api/openstack/compute/plugins/v3/servers.py:644 +#: nova/api/openstack/compute/servers.py:1093 +#: nova/api/openstack/compute/servers.py:1112 +#: nova/api/openstack/compute/plugins/v3/servers.py:662 +#: nova/api/openstack/compute/plugins/v3/servers.py:679 msgid "Instance has not been resized." msgstr "Instance has not been resized." -#: nova/api/openstack/compute/servers.py:1118 -#: nova/api/openstack/compute/plugins/v3/servers.py:647 +#: nova/api/openstack/compute/servers.py:1115 +#: nova/api/openstack/compute/plugins/v3/servers.py:682 #, fuzzy msgid "Flavor used by the instance could not be found." msgstr "Instance %(instance_id)s could not be found." -#: nova/api/openstack/compute/servers.py:1134 -#: nova/api/openstack/compute/plugins/v3/servers.py:661 +#: nova/api/openstack/compute/servers.py:1131 +#: nova/api/openstack/compute/plugins/v3/servers.py:697 msgid "Argument 'type' for reboot must be a string" msgstr "" -#: nova/api/openstack/compute/servers.py:1140 -#: nova/api/openstack/compute/plugins/v3/servers.py:667 +#: nova/api/openstack/compute/servers.py:1137 +#: nova/api/openstack/compute/plugins/v3/servers.py:703 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "Argument 'type' for reboot is not HARD or SOFT" -#: nova/api/openstack/compute/servers.py:1144 -#: nova/api/openstack/compute/plugins/v3/servers.py:671 +#: nova/api/openstack/compute/servers.py:1141 +#: nova/api/openstack/compute/plugins/v3/servers.py:707 msgid "Missing argument 'type' for reboot" msgstr "Missing argument 'type' for reboot" -#: nova/api/openstack/compute/servers.py:1171 -#: nova/api/openstack/compute/plugins/v3/servers.py:699 +#: nova/api/openstack/compute/servers.py:1168 +#: nova/api/openstack/compute/plugins/v3/servers.py:735 msgid "Unable to locate requested flavor." msgstr "Unable to locate requested flavor." -#: nova/api/openstack/compute/servers.py:1174 -#: nova/api/openstack/compute/plugins/v3/servers.py:702 +#: nova/api/openstack/compute/servers.py:1171 +#: nova/api/openstack/compute/plugins/v3/servers.py:738 msgid "Resize requires a flavor change." msgstr "Resize requires a flavor change." -#: nova/api/openstack/compute/servers.py:1182 -#: nova/api/openstack/compute/plugins/v3/servers.py:710 +#: nova/api/openstack/compute/servers.py:1181 +#: nova/api/openstack/compute/plugins/v3/servers.py:748 msgid "You are not authorized to access the image the instance was started with." msgstr "" -#: nova/api/openstack/compute/servers.py:1186 -#: nova/api/openstack/compute/plugins/v3/servers.py:714 +#: nova/api/openstack/compute/servers.py:1185 +#: nova/api/openstack/compute/plugins/v3/servers.py:752 #, fuzzy msgid "Image that the instance was started with could not be found." msgstr "Instance %(instance_id)s could not be found." -#: nova/api/openstack/compute/servers.py:1190 -#: nova/api/openstack/compute/plugins/v3/servers.py:718 +#: nova/api/openstack/compute/servers.py:1189 +#: nova/api/openstack/compute/plugins/v3/servers.py:756 #, fuzzy msgid "Invalid instance image." msgstr "%s is a valid instance name" @@ -2969,177 +2843,132 @@ msgid "Could not parse imageRef from request." msgstr "Could not parse imageRef from request." #: nova/api/openstack/compute/servers.py:1394 -#: nova/api/openstack/compute/plugins/v3/servers.py:854 +#: nova/api/openstack/compute/plugins/v3/servers.py:895 msgid "Cannot find image for rebuild" msgstr "Cannot find image for rebuild" -#: nova/api/openstack/compute/servers.py:1427 +#: nova/api/openstack/compute/servers.py:1428 msgid "createImage entity requires name attribute" msgstr "createImage entity requires name attribute" -#: nova/api/openstack/compute/servers.py:1436 -#: nova/api/openstack/compute/contrib/admin_actions.py:288 -#: nova/api/openstack/compute/plugins/v3/servers.py:894 +#: nova/api/openstack/compute/servers.py:1437 +#: nova/api/openstack/compute/contrib/admin_actions.py:283 +#: nova/api/openstack/compute/plugins/v3/servers.py:936 msgid "Invalid metadata" msgstr "Invalid metadata" -#: nova/api/openstack/compute/servers.py:1494 +#: nova/api/openstack/compute/servers.py:1495 msgid "Invalid adminPass" msgstr "Invalid adminPass" -#: nova/api/openstack/compute/contrib/admin_actions.py:63 -#: nova/api/openstack/compute/contrib/admin_actions.py:88 -#: nova/api/openstack/compute/contrib/admin_actions.py:113 -#: nova/api/openstack/compute/contrib/admin_actions.py:135 -#: nova/api/openstack/compute/contrib/admin_actions.py:178 -#: nova/api/openstack/compute/contrib/admin_actions.py:197 -#: nova/api/openstack/compute/contrib/admin_actions.py:216 -#: nova/api/openstack/compute/contrib/admin_actions.py:235 -#: nova/api/openstack/compute/contrib/admin_actions.py:393 -#: nova/api/openstack/compute/contrib/multinic.py:43 +#: nova/api/openstack/compute/contrib/admin_actions.py:64 +#: nova/api/openstack/compute/contrib/admin_actions.py:86 +#: nova/api/openstack/compute/contrib/admin_actions.py:108 +#: nova/api/openstack/compute/contrib/admin_actions.py:130 +#: nova/api/openstack/compute/contrib/admin_actions.py:173 +#: nova/api/openstack/compute/contrib/admin_actions.py:192 +#: nova/api/openstack/compute/contrib/admin_actions.py:211 +#: nova/api/openstack/compute/contrib/admin_actions.py:230 +#: nova/api/openstack/compute/contrib/admin_actions.py:388 +#: nova/api/openstack/compute/contrib/multinic.py:44 #: nova/api/openstack/compute/contrib/rescue.py:45 #: nova/api/openstack/compute/contrib/shelve.py:43 msgid "Server not found" msgstr "Server not found" -#: nova/api/openstack/compute/contrib/admin_actions.py:66 -#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 -#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 -msgid "Virt driver does not implement pause function." -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:70 -#, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:91 -msgid "Virt driver does not implement unpause function." -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:95 -#, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:117 -#, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:139 -#, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:165 -#, python-format -msgid "Error in migrate %s" -msgstr "Error in migrate %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:184 -#, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:203 -#, python-format -msgid "Compute.api::inject_network_info %s" -msgstr "Compute.api::inject_network_info %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:220 -#, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:239 -#, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:265 +#: nova/api/openstack/compute/contrib/admin_actions.py:260 #, python-format msgid "createBackup entity requires %s attribute" msgstr "createBackup entity requires %s attribute" -#: nova/api/openstack/compute/contrib/admin_actions.py:269 +#: nova/api/openstack/compute/contrib/admin_actions.py:264 msgid "Malformed createBackup entity" msgstr "Malformed createBackup entity" -#: nova/api/openstack/compute/contrib/admin_actions.py:275 +#: nova/api/openstack/compute/contrib/admin_actions.py:270 msgid "createBackup attribute 'rotation' must be an integer" msgstr "createBackup attribute 'rotation' must be an integer" -#: nova/api/openstack/compute/contrib/admin_actions.py:278 +#: nova/api/openstack/compute/contrib/admin_actions.py:273 #, fuzzy msgid "createBackup attribute 'rotation' must be greater than or equal to zero" msgstr "createBackup attribute 'rotation' must be an integer" -#: nova/api/openstack/compute/contrib/admin_actions.py:294 +#: nova/api/openstack/compute/contrib/admin_actions.py:289 #: nova/api/openstack/compute/contrib/console_output.py:46 #: nova/api/openstack/compute/contrib/server_start_stop.py:40 msgid "Instance not found" msgstr "Instance not found" -#: nova/api/openstack/compute/contrib/admin_actions.py:325 +#: nova/api/openstack/compute/contrib/admin_actions.py:320 msgid "" "host, block_migration and disk_over_commit must be specified for live " "migration." msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:362 +#: nova/api/openstack/compute/contrib/admin_actions.py:357 #, fuzzy, python-format msgid "Live migration of instance %s to another host failed" msgstr "Live migration of instance %(id)s to host %(host)s failed" -#: nova/api/openstack/compute/contrib/admin_actions.py:365 +#: nova/api/openstack/compute/contrib/admin_actions.py:360 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "Live migration of instance %(id)s to host %(host)s failed" -#: nova/api/openstack/compute/contrib/admin_actions.py:383 -#: nova/api/openstack/compute/plugins/v3/admin_actions.py:83 +#: nova/api/openstack/compute/contrib/admin_actions.py:378 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "Desired state must be specified. Valid states are: %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:397 +#: nova/api/openstack/compute/contrib/agents.py:100 +#: nova/api/openstack/compute/contrib/agents.py:118 +#: nova/api/openstack/compute/contrib/agents.py:156 +#: nova/api/openstack/compute/contrib/cloudpipe_update.py:55 #, python-format -msgid "Compute.api::resetState %s" -msgstr "Compute.api::resetState %s" +msgid "Invalid request body: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/aggregates.py:39 +msgid "Only host parameter can be specified" +msgstr "" + +#: nova/api/openstack/compute/contrib/aggregates.py:42 +msgid "Host parameter must be specified" +msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:161 +#: nova/api/openstack/compute/contrib/aggregates.py:168 #, python-format msgid "Aggregates does not have %s action" msgstr "Aggregates does not have %s action" -#: nova/api/openstack/compute/contrib/aggregates.py:165 +#: nova/api/openstack/compute/contrib/aggregates.py:172 #: nova/api/openstack/compute/contrib/flavormanage.py:55 #: nova/api/openstack/compute/contrib/keypairs.py:86 #: nova/api/openstack/compute/plugins/v3/aggregates.py:169 msgid "Invalid request body" msgstr "Invalid request body" -#: nova/api/openstack/compute/contrib/aggregates.py:175 -#: nova/api/openstack/compute/contrib/aggregates.py:180 +#: nova/api/openstack/compute/contrib/aggregates.py:182 +#: nova/api/openstack/compute/contrib/aggregates.py:187 #, python-format msgid "Cannot add host %(host)s in aggregate %(id)s" msgstr "Cannot add host %(host)s in aggregate %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:194 -#: nova/api/openstack/compute/contrib/aggregates.py:198 +#: nova/api/openstack/compute/contrib/aggregates.py:201 +#: nova/api/openstack/compute/contrib/aggregates.py:205 #: nova/api/openstack/compute/plugins/v3/aggregates.py:153 #: nova/api/openstack/compute/plugins/v3/aggregates.py:157 #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Cannot remove host %(host)s in aggregate %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:217 +#: nova/api/openstack/compute/contrib/aggregates.py:224 #: nova/api/openstack/compute/plugins/v3/aggregates.py:177 msgid "The value of metadata must be a dict" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:229 +#: nova/api/openstack/compute/contrib/aggregates.py:237 #, python-format msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "Cannot set metadata %(metadata)s in aggregate %(id)s" @@ -3155,32 +2984,36 @@ msgstr "" msgid "Delete snapshot with id: %s" msgstr "Delete snapshot with id: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:104 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:105 #, fuzzy msgid "Attach interface" msgstr "Failed to add interface: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:119 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:154 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:177 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:169 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:158 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:184 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174 +#: nova/network/security_group/neutron_driver.py:510 +#: nova/network/security_group/neutron_driver.py:514 +#: nova/network/security_group/neutron_driver.py:518 +#: nova/network/security_group/neutron_driver.py:522 +#: nova/network/security_group/neutron_driver.py:526 #, fuzzy msgid "Network driver does not support this function." msgstr "Virt driver does not implement uptime function." -#: nova/api/openstack/compute/contrib/attach_interfaces.py:123 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:124 #, fuzzy msgid "Failed to attach interface" msgstr "Failed to add interface: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:130 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:131 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:134 #, fuzzy msgid "Attachments update is not supported" msgstr "attribute not supported: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:142 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:142 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:146 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144 #, fuzzy, python-format msgid "Detach interface %s" msgstr "Starting VLAN interface %s" @@ -3194,42 +3027,35 @@ msgstr "" msgid "Must specify id or address" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:252 +#: nova/api/openstack/compute/contrib/cells.py:250 #, fuzzy, python-format msgid "Cell %(id)s not found." msgstr "Rule (%s) not found" -#: nova/api/openstack/compute/contrib/cells.py:285 -#: nova/api/openstack/compute/plugins/v3/cells.py:192 +#: nova/api/openstack/compute/contrib/cells.py:286 #, fuzzy msgid "Cell name cannot be empty" msgstr "Security group name cannot be empty" #: nova/api/openstack/compute/contrib/cells.py:289 -#: nova/api/openstack/compute/plugins/v3/cells.py:196 msgid "Cell name cannot contain '!' or '.'" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:296 -#: nova/api/openstack/compute/plugins/v3/cells.py:203 +#: nova/api/openstack/compute/contrib/cells.py:295 msgid "Cell type must be 'parent' or 'child'" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:352 -#: nova/api/openstack/compute/contrib/cells.py:376 -#: nova/api/openstack/compute/plugins/v3/cells.py:259 -#: nova/api/openstack/compute/plugins/v3/cells.py:282 +#: nova/api/openstack/compute/contrib/cells.py:353 +#: nova/api/openstack/compute/contrib/cells.py:378 #, fuzzy msgid "No cell information in request" msgstr "Block device information present: %s" #: nova/api/openstack/compute/contrib/cells.py:357 -#: nova/api/openstack/compute/plugins/v3/cells.py:264 msgid "No cell name in request" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:411 -#: nova/api/openstack/compute/plugins/v3/cells.py:319 +#: nova/api/openstack/compute/contrib/cells.py:415 msgid "Only 'updated_since', 'project_id' and 'deleted' are understood." msgstr "" @@ -3303,24 +3129,27 @@ msgstr "" msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s must be either 'MANUAL' or 'AUTO'." -#: nova/api/openstack/compute/contrib/evacuate.py:53 -#, fuzzy -msgid "host and onSharedStorage must be specified." -msgstr "host and block_migration must be specified." +#: nova/api/openstack/compute/contrib/evacuate.py:54 +msgid "host must be specified." +msgstr "" #: nova/api/openstack/compute/contrib/evacuate.py:61 +msgid "onSharedStorage must be specified." +msgstr "" + +#: nova/api/openstack/compute/contrib/evacuate.py:69 #: nova/api/openstack/compute/plugins/v3/evacuate.py:67 msgid "admin password can't be changed on existing disk" msgstr "" -#: nova/api/openstack/compute/contrib/evacuate.py:71 -#: nova/api/openstack/compute/plugins/v3/evacuate.py:77 +#: nova/api/openstack/compute/contrib/evacuate.py:80 +#: nova/api/openstack/compute/plugins/v3/evacuate.py:78 #, python-format msgid "Compute host %s not found." msgstr "" -#: nova/api/openstack/compute/contrib/evacuate.py:77 -#: nova/api/openstack/compute/plugins/v3/evacuate.py:83 +#: nova/api/openstack/compute/contrib/evacuate.py:86 +#: nova/api/openstack/compute/plugins/v3/evacuate.py:84 msgid "The target host can't be the same one." msgstr "" @@ -3373,87 +3202,87 @@ msgstr "" msgid "DNS entries not found." msgstr "Instance not found" -#: nova/api/openstack/compute/contrib/floating_ips.py:129 -#: nova/api/openstack/compute/contrib/floating_ips.py:183 +#: nova/api/openstack/compute/contrib/floating_ips.py:130 +#: nova/api/openstack/compute/contrib/floating_ips.py:186 #, python-format msgid "Floating ip not found for id %s" msgstr "Floating ip not found for id %s" -#: nova/api/openstack/compute/contrib/floating_ips.py:162 +#: nova/api/openstack/compute/contrib/floating_ips.py:163 #, python-format msgid "No more floating ips in pool %s." msgstr "No more floating ips in pool %s." -#: nova/api/openstack/compute/contrib/floating_ips.py:164 +#: nova/api/openstack/compute/contrib/floating_ips.py:165 msgid "No more floating ips available." msgstr "No more floating ips available." -#: nova/api/openstack/compute/contrib/floating_ips.py:168 +#: nova/api/openstack/compute/contrib/floating_ips.py:169 #, python-format msgid "IP allocation over quota in pool %s." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:170 +#: nova/api/openstack/compute/contrib/floating_ips.py:171 msgid "IP allocation over quota." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:220 -#: nova/api/openstack/compute/contrib/floating_ips.py:285 -#: nova/api/openstack/compute/contrib/security_groups.py:482 +#: nova/api/openstack/compute/contrib/floating_ips.py:223 +#: nova/api/openstack/compute/contrib/floating_ips.py:288 +#: nova/api/openstack/compute/contrib/security_groups.py:488 msgid "Missing parameter dict" msgstr "Missing parameter dict" -#: nova/api/openstack/compute/contrib/floating_ips.py:223 -#: nova/api/openstack/compute/contrib/floating_ips.py:288 +#: nova/api/openstack/compute/contrib/floating_ips.py:226 +#: nova/api/openstack/compute/contrib/floating_ips.py:291 msgid "Address not specified" msgstr "Address not specified" -#: nova/api/openstack/compute/contrib/floating_ips.py:229 +#: nova/api/openstack/compute/contrib/floating_ips.py:232 msgid "No nw_info cache associated with instance" msgstr "No nw_info cache associated with instance" -#: nova/api/openstack/compute/contrib/floating_ips.py:234 +#: nova/api/openstack/compute/contrib/floating_ips.py:237 msgid "No fixed ips associated to instance" msgstr "No fixed ips associated to instance" -#: nova/api/openstack/compute/contrib/floating_ips.py:245 +#: nova/api/openstack/compute/contrib/floating_ips.py:248 #, fuzzy msgid "Specified fixed address not assigned to instance" msgstr "No fixed ips associated to instance" -#: nova/api/openstack/compute/contrib/floating_ips.py:259 +#: nova/api/openstack/compute/contrib/floating_ips.py:262 msgid "floating ip is already associated" msgstr "floating ip is already associated" -#: nova/api/openstack/compute/contrib/floating_ips.py:262 +#: nova/api/openstack/compute/contrib/floating_ips.py:265 msgid "l3driver call to add floating ip failed" msgstr "l3driver call to add floating ip failed" -#: nova/api/openstack/compute/contrib/floating_ips.py:265 -#: nova/api/openstack/compute/contrib/floating_ips.py:296 +#: nova/api/openstack/compute/contrib/floating_ips.py:268 +#: nova/api/openstack/compute/contrib/floating_ips.py:299 msgid "floating ip not found" msgstr "floating ip not found" -#: nova/api/openstack/compute/contrib/floating_ips.py:270 +#: nova/api/openstack/compute/contrib/floating_ips.py:273 msgid "Error. Unable to associate floating ip" msgstr "Error. Unable to associate floating ip" -#: nova/api/openstack/compute/contrib/floating_ips.py:311 +#: nova/api/openstack/compute/contrib/floating_ips.py:314 msgid "Floating ip is not associated" msgstr "Floating ip is not associated" -#: nova/api/openstack/compute/contrib/floating_ips.py:315 +#: nova/api/openstack/compute/contrib/floating_ips.py:318 #, fuzzy, python-format msgid "Floating ip %(address)s is not associated with instance %(id)s." msgstr "Floating ip %(address)s is not associated." -#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:118 +#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:116 #: nova/api/openstack/compute/contrib/services.py:173 #: nova/api/openstack/compute/plugins/v3/services.py:124 msgid "Unknown action" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:146 +#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:144 #: nova/cmd/manage.py:417 #, python-format msgid "/%s should be specified as single address(es) not in cidr format" @@ -3464,82 +3293,82 @@ msgstr "" msgid "fping utility is not found." msgstr "floating ip not found" -#: nova/api/openstack/compute/contrib/hosts.py:183 +#: nova/api/openstack/compute/contrib/hosts.py:185 #, python-format msgid "Invalid update setting: '%s'" msgstr "Invalid update setting: '%s'" -#: nova/api/openstack/compute/contrib/hosts.py:186 +#: nova/api/openstack/compute/contrib/hosts.py:188 #, python-format msgid "Invalid status: '%s'" msgstr "Invalid status: '%s'" -#: nova/api/openstack/compute/contrib/hosts.py:188 +#: nova/api/openstack/compute/contrib/hosts.py:190 #, python-format msgid "Invalid mode: '%s'" msgstr "Invalid mode: '%s'" -#: nova/api/openstack/compute/contrib/hosts.py:190 +#: nova/api/openstack/compute/contrib/hosts.py:192 msgid "'status' or 'maintenance_mode' needed for host update" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:206 -#: nova/api/openstack/compute/plugins/v3/hosts.py:134 +#: nova/api/openstack/compute/contrib/hosts.py:208 +#: nova/api/openstack/compute/plugins/v3/hosts.py:135 #, fuzzy, python-format msgid "Putting host %(host_name)s in maintenance mode %(mode)s." msgstr "Putting host %(host)s in maintenance mode %(mode)s." -#: nova/api/openstack/compute/contrib/hosts.py:212 -#: nova/api/openstack/compute/plugins/v3/hosts.py:140 +#: nova/api/openstack/compute/contrib/hosts.py:214 +#: nova/api/openstack/compute/plugins/v3/hosts.py:141 #, fuzzy msgid "Virt driver does not implement host maintenance mode." msgstr "Virt driver does not implement uptime function." -#: nova/api/openstack/compute/contrib/hosts.py:227 -#: nova/api/openstack/compute/plugins/v3/hosts.py:156 +#: nova/api/openstack/compute/contrib/hosts.py:229 +#: nova/api/openstack/compute/plugins/v3/hosts.py:157 #, fuzzy, python-format msgid "Enabling host %s." msgstr "Calling setter %s" -#: nova/api/openstack/compute/contrib/hosts.py:229 -#: nova/api/openstack/compute/plugins/v3/hosts.py:158 +#: nova/api/openstack/compute/contrib/hosts.py:231 +#: nova/api/openstack/compute/plugins/v3/hosts.py:159 #, fuzzy, python-format msgid "Disabling host %s." msgstr "Updating host stats" -#: nova/api/openstack/compute/contrib/hosts.py:234 -#: nova/api/openstack/compute/plugins/v3/hosts.py:163 +#: nova/api/openstack/compute/contrib/hosts.py:236 +#: nova/api/openstack/compute/plugins/v3/hosts.py:164 #, fuzzy msgid "Virt driver does not implement host disabled status." msgstr "Virt driver does not implement uptime function." -#: nova/api/openstack/compute/contrib/hosts.py:250 -#: nova/api/openstack/compute/plugins/v3/hosts.py:181 +#: nova/api/openstack/compute/contrib/hosts.py:252 +#: nova/api/openstack/compute/plugins/v3/hosts.py:182 #, fuzzy msgid "Virt driver does not implement host power management." msgstr "Virt driver does not implement uptime function." -#: nova/api/openstack/compute/contrib/hosts.py:336 -#: nova/api/openstack/compute/plugins/v3/hosts.py:274 +#: nova/api/openstack/compute/contrib/hosts.py:338 +#: nova/api/openstack/compute/plugins/v3/hosts.py:275 msgid "Describe-resource is admin only functionality" msgstr "Describe-resource is admin only functionality" -#: nova/api/openstack/compute/contrib/hypervisors.py:193 -#: nova/api/openstack/compute/contrib/hypervisors.py:205 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:93 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:105 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:140 +#: nova/api/openstack/compute/contrib/hypervisors.py:208 +#: nova/api/openstack/compute/contrib/hypervisors.py:220 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:100 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:112 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:147 #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "Hypervisor with ID '%s' could not be found." -#: nova/api/openstack/compute/contrib/hypervisors.py:213 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:113 +#: nova/api/openstack/compute/contrib/hypervisors.py:228 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:120 msgid "Virt driver does not implement uptime function." msgstr "Virt driver does not implement uptime function." -#: nova/api/openstack/compute/contrib/hypervisors.py:229 -#: nova/api/openstack/compute/contrib/hypervisors.py:239 +#: nova/api/openstack/compute/contrib/hypervisors.py:244 +#: nova/api/openstack/compute/contrib/hypervisors.py:254 #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "No hypervisor matching '%s' could be found." @@ -3554,27 +3383,22 @@ msgstr "Invalid timestamp for date %s" msgid "Quota exceeded, too many key pairs." msgstr "Quota exceeded, too many key pairs." -#: nova/api/openstack/compute/contrib/multinic.py:54 +#: nova/api/openstack/compute/contrib/multinic.py:55 msgid "Missing 'networkId' argument for addFixedIp" msgstr "Missing 'networkId' argument for addFixedIp" -#: nova/api/openstack/compute/contrib/multinic.py:70 +#: nova/api/openstack/compute/contrib/multinic.py:75 msgid "Missing 'address' argument for removeFixedIp" msgstr "Missing 'address' argument for removeFixedIp" -#: nova/api/openstack/compute/contrib/multinic.py:80 -#, python-format -msgid "Unable to find address %r" -msgstr "Unable to find address %r" - #: nova/api/openstack/compute/contrib/networks_associate.py:40 #: nova/api/openstack/compute/contrib/networks_associate.py:56 #: nova/api/openstack/compute/contrib/networks_associate.py:74 -#: nova/api/openstack/compute/contrib/os_networks.py:78 -#: nova/api/openstack/compute/contrib/os_networks.py:93 -#: nova/api/openstack/compute/contrib/os_networks.py:106 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:137 +#: nova/api/openstack/compute/contrib/os_networks.py:79 +#: nova/api/openstack/compute/contrib/os_networks.py:94 +#: nova/api/openstack/compute/contrib/os_networks.py:107 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:112 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:144 msgid "Network not found" msgstr "Network not found" @@ -3590,70 +3414,55 @@ msgstr "" msgid "Associate host is not implemented by the configured Network API" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:81 +#: nova/api/openstack/compute/contrib/os_networks.py:82 msgid "Disassociate network is not implemented by the configured Network API" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:100 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 -#, python-format -msgid "Deleting network with id %s" -msgstr "Deleting network with id %s" - -#: nova/api/openstack/compute/contrib/os_networks.py:118 +#: nova/api/openstack/compute/contrib/os_networks.py:119 msgid "Missing network in body" msgstr "Missing network in body" -#: nova/api/openstack/compute/contrib/os_networks.py:122 +#: nova/api/openstack/compute/contrib/os_networks.py:123 msgid "Network label is required" msgstr "Network label is required" -#: nova/api/openstack/compute/contrib/os_networks.py:126 +#: nova/api/openstack/compute/contrib/os_networks.py:127 msgid "Network cidr or cidr_v6 is required" msgstr "Network cidr or cidr_v6 is required" -#: nova/api/openstack/compute/contrib/os_networks.py:152 +#: nova/api/openstack/compute/contrib/os_networks.py:153 msgid "VLAN support must be enabled" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:155 +#: nova/api/openstack/compute/contrib/os_networks.py:156 #, python-format msgid "Cannot associate network %(network)s with project %(project)s: %(message)s" msgstr "Cannot associate network %(network)s with project %(project)s: %(message)s" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:83 -msgid "Failed to get default networks" -msgstr "" - -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:122 -#, fuzzy -msgid "Failed to update usages deallocating network." -msgstr "Failed to update usages deallocating floating IP" - -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:168 #, fuzzy msgid "No CIDR requested" msgstr "Can not find requested image" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:163 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:174 msgid "Requested network does not contain enough (2+) usable hosts" msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:167 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178 msgid "CIDR is malformed." msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:170 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:181 #, fuzzy msgid "Address could not be converted." msgstr "Resource could not be found." -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:189 #, fuzzy msgid "Quota exceeded, too many networks." msgstr "Quota exceeded, too many key pairs." -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:191 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:202 #, fuzzy msgid "Create networks failed" msgstr "Create failed" @@ -3713,7 +3522,7 @@ msgid "Malformed scheduler_hints attribute" msgstr "Malformed scheduler_hints attribute" #: nova/api/openstack/compute/contrib/security_group_default_rules.py:127 -#: nova/api/openstack/compute/contrib/security_groups.py:387 +#: nova/api/openstack/compute/contrib/security_groups.py:394 msgid "Not enough parameters to build a valid rule." msgstr "Not enough parameters to build a valid rule." @@ -3727,81 +3536,80 @@ msgstr "This rule already exists in group %s" msgid "security group default rule not found" msgstr "Security group with rule %(rule_id)s not found." -#: nova/api/openstack/compute/contrib/security_groups.py:395 +#: nova/api/openstack/compute/contrib/security_groups.py:402 #, fuzzy, python-format msgid "Bad prefix for network in cidr %s" msgstr "Bad prefix for to_global_ipv6: %s" -#: nova/api/openstack/compute/contrib/security_groups.py:485 +#: nova/api/openstack/compute/contrib/security_groups.py:491 msgid "Security group not specified" msgstr "Security group not specified" -#: nova/api/openstack/compute/contrib/security_groups.py:489 +#: nova/api/openstack/compute/contrib/security_groups.py:495 msgid "Security group name cannot be empty" msgstr "Security group name cannot be empty" -#: nova/api/openstack/compute/contrib/server_external_events.py:92 +#: nova/api/openstack/compute/contrib/server_external_events.py:93 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:65 #, python-format msgid "event entity requires key %(key)s" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:96 +#: nova/api/openstack/compute/contrib/server_external_events.py:97 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:69 #, python-format msgid "event entity contains unsupported items: %s" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:102 +#: nova/api/openstack/compute/contrib/server_external_events.py:103 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:75 #, python-format msgid "Invalid event status `%s'" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:121 -#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94 +#: nova/api/openstack/compute/contrib/server_external_events.py:126 #, python-format -msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s" +msgid "Creating event %(name)s:%(tag)s for instance %(instance_uuid)s" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:130 +#: nova/api/openstack/compute/contrib/server_external_events.py:148 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:103 msgid "No instances found for any event" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:161 +#: nova/api/openstack/compute/contrib/server_groups.py:163 msgid "Conflicting policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:166 +#: nova/api/openstack/compute/contrib/server_groups.py:168 #, python-format msgid "Invalid policies: %s" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:171 +#: nova/api/openstack/compute/contrib/server_groups.py:173 msgid "Duplicate policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:176 +#: nova/api/openstack/compute/contrib/server_groups.py:178 msgid "the body is invalid." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:185 +#: nova/api/openstack/compute/contrib/server_groups.py:187 #, python-format msgid "'%s' is either missing or empty." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:191 +#: nova/api/openstack/compute/contrib/server_groups.py:193 #, python-format msgid "Invalid format for name: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:199 +#: nova/api/openstack/compute/contrib/server_groups.py:201 #, python-format msgid "'%s' is not a list" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:203 +#: nova/api/openstack/compute/contrib/server_groups.py:205 #, python-format msgid "unsupported fields: %s" msgstr "" @@ -3828,11 +3636,11 @@ msgstr "" msgid "Missing disabled reason field" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:230 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:231 msgid "Datetime is in invalid format" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:249 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:250 msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" @@ -3907,12 +3715,12 @@ msgstr "" msgid "Invalid request format for metadata" msgstr "" -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:106 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:109 #, python-format msgid "Attach interface to %s" msgstr "" -#: nova/api/openstack/compute/plugins/v3/cells.py:187 +#: nova/api/openstack/compute/plugins/v3/cells.py:189 #, python-format msgid "Cell %s doesn't exist." msgstr "" @@ -3936,83 +3744,101 @@ msgstr "" msgid "Volume %(volume_id)s is not attached to the instance %(server_id)s" msgstr "" -#: nova/api/openstack/compute/plugins/v3/flavors.py:94 +#: nova/api/openstack/compute/plugins/v3/flavors.py:96 #, python-format msgid "Invalid min_ram filter [%s]" msgstr "" -#: nova/api/openstack/compute/plugins/v3/flavors.py:101 +#: nova/api/openstack/compute/plugins/v3/flavors.py:103 #, python-format msgid "Invalid min_disk filter [%s]" msgstr "" -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:125 -msgid "Need parameter 'query' to specify which hypervisor to filter on" +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:132 +msgid "Need parameter 'query' to specify which hypervisor to filter on" +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 +msgid "Virt driver does not implement pause function." +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/server_actions.py:76 +#, python-format +msgid "Action %s not found" +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/server_diagnostics.py:46 +msgid "Unable to get diagnostics, functionality not implemented" msgstr "" -#: nova/api/openstack/compute/plugins/v3/server_actions.py:76 +#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94 #, python-format -msgid "Action %s not found" +msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:212 +#: nova/api/openstack/compute/plugins/v3/servers.py:235 msgid "Invalid changes_since value" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:335 +#: nova/api/openstack/compute/plugins/v3/servers.py:306 +#, fuzzy, python-format +msgid "Flavor '%s' could not be found " +msgstr "Host '%s' could not be found." + +#: nova/api/openstack/compute/plugins/v3/servers.py:358 msgid "Unknown argument: port" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:343 +#: nova/api/openstack/compute/plugins/v3/servers.py:366 #, python-format msgid "" "Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port " "already has a Fixed IP allocated." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:412 -#: nova/api/openstack/compute/plugins/v3/servers.py:587 -msgid "The request body is invalid" +#: nova/api/openstack/compute/plugins/v3/servers.py:494 +#: nova/api/openstack/compute/plugins/v3/servers.py:522 +msgid "Invalid flavor_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:470 -#: nova/api/openstack/compute/plugins/v3/servers.py:498 -msgid "Invalid flavor_ref provided." +#: nova/api/openstack/compute/plugins/v3/servers.py:620 +msgid "The request body is invalid" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:598 +#: nova/api/openstack/compute/plugins/v3/servers.py:631 msgid "host_id cannot be updated." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:743 +#: nova/api/openstack/compute/plugins/v3/servers.py:782 msgid "Invalid image_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:762 +#: nova/api/openstack/compute/plugins/v3/servers.py:801 msgid "Missing image_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:769 +#: nova/api/openstack/compute/plugins/v3/servers.py:808 msgid "Missing flavor_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:782 +#: nova/api/openstack/compute/plugins/v3/servers.py:822 msgid "Resize request has invalid 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:785 +#: nova/api/openstack/compute/plugins/v3/servers.py:825 msgid "Resize requests require 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:801 +#: nova/api/openstack/compute/plugins/v3/servers.py:842 msgid "Could not parse image_ref from request." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:885 +#: nova/api/openstack/compute/plugins/v3/servers.py:927 msgid "create_image entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:947 +#: nova/api/openstack/compute/plugins/v3/servers.py:989 msgid "Invalid admin_password" msgstr "" @@ -4020,11 +3846,7 @@ msgstr "" msgid "Disabled reason contains invalid characters or is too long" msgstr "" -#: nova/api/openstack/compute/views/servers.py:197 -msgid "Instance has had its instance_type removed from the DB" -msgstr "Instance has had its instance_type removed from the DB" - -#: nova/api/validation/validators.py:62 +#: nova/api/validation/validators.py:73 #, python-format msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" @@ -4037,78 +3859,78 @@ msgid "" " use of it in production right now may be risky." msgstr "" -#: nova/cells/messaging.py:205 +#: nova/cells/messaging.py:204 #, fuzzy, python-format msgid "Error processing message locally: %(exc)s" msgstr "Error processing message. Skipping it." -#: nova/cells/messaging.py:366 nova/cells/messaging.py:374 +#: nova/cells/messaging.py:365 nova/cells/messaging.py:373 #, python-format msgid "destination is %(target_cell)s but routing_path is %(routing_path)s" msgstr "" -#: nova/cells/messaging.py:386 +#: nova/cells/messaging.py:385 #, python-format msgid "Unknown %(cell_type)s when routing to %(target_cell)s" msgstr "" -#: nova/cells/messaging.py:410 +#: nova/cells/messaging.py:409 #, fuzzy, python-format msgid "Error locating next hop for message: %(exc)s" msgstr "no method for message: %s" -#: nova/cells/messaging.py:437 +#: nova/cells/messaging.py:436 #, fuzzy, python-format msgid "Failed to send message to cell: %(next_hop)s: %(exc)s" msgstr "Failed to load extension %(ext_factory)s: %(exc)s" -#: nova/cells/messaging.py:516 +#: nova/cells/messaging.py:515 #, fuzzy, python-format msgid "Error locating next hops for message: %(exc)s" msgstr "no method for message: %s" -#: nova/cells/messaging.py:536 +#: nova/cells/messaging.py:535 #, fuzzy, python-format msgid "Error sending message to next hops: %(exc)s" msgstr "Sending message(s) to: %s" -#: nova/cells/messaging.py:554 +#: nova/cells/messaging.py:553 #, python-format msgid "Error waiting for responses from neighbor cells: %(exc)s" msgstr "" -#: nova/cells/messaging.py:665 +#: nova/cells/messaging.py:664 #, fuzzy, python-format msgid "Unknown method '%(method)s' in compute API" msgstr "Casted '%(method)s' to compute '%(host)s'" -#: nova/cells/messaging.py:1103 +#: nova/cells/messaging.py:1106 #, python-format msgid "Got message to create instance fault: %(instance_fault)s" msgstr "" -#: nova/cells/messaging.py:1126 +#: nova/cells/messaging.py:1129 #, python-format msgid "" "Forcing a sync of instances, project_id=%(projid_str)s, " "updated_since=%(since_str)s" msgstr "" -#: nova/cells/messaging.py:1205 +#: nova/cells/messaging.py:1208 #, python-format msgid "No match when trying to update BDM: %(bdm)s" msgstr "" -#: nova/cells/messaging.py:1680 +#: nova/cells/messaging.py:1683 #, python-format msgid "No cell_name for %(method)s() from API" msgstr "" -#: nova/cells/messaging.py:1697 +#: nova/cells/messaging.py:1700 msgid "No cell_name for instance update from API" msgstr "" -#: nova/cells/messaging.py:1860 +#: nova/cells/messaging.py:1863 #, python-format msgid "Returning exception %s to caller" msgstr "Returning exception %s to caller" @@ -4121,33 +3943,38 @@ msgstr "" msgid "Failed to notify cells of BDM destroy." msgstr "" -#: nova/cells/scheduler.py:192 +#: nova/cells/scheduler.py:191 #, python-format msgid "Couldn't communicate with cell '%s'" msgstr "" -#: nova/cells/scheduler.py:196 +#: nova/cells/scheduler.py:195 msgid "Couldn't communicate with any cells" msgstr "" -#: nova/cells/scheduler.py:234 +#: nova/cells/scheduler.py:233 #, python-format msgid "" "No cells available when scheduling. Will retry in %(sleep_time)s " "second(s)" msgstr "" -#: nova/cells/scheduler.py:240 +#: nova/cells/scheduler.py:239 #, fuzzy, python-format msgid "Error scheduling instances %(instance_uuids)s" msgstr "Destroying VDIs for Instance %(instance_uuid)s" -#: nova/cells/state.py:352 +#: nova/cells/state.py:182 +#, python-format +msgid "DB error: %s" +msgstr "DB error: %s" + +#: nova/cells/state.py:363 #, python-format msgid "Unknown cell '%(cell_name)s' when trying to update capabilities" msgstr "" -#: nova/cells/state.py:367 +#: nova/cells/state.py:378 #, python-format msgid "Unknown cell '%(cell_name)s' when trying to update capacities" msgstr "" @@ -4278,17 +4105,17 @@ msgstr "" msgid "No db access allowed in nova-compute: %s" msgstr "" -#: nova/cmd/dhcpbridge.py:109 +#: nova/cmd/dhcpbridge.py:108 #, python-format msgid "No db access allowed in nova-dhcpbridge: %s" msgstr "" -#: nova/cmd/dhcpbridge.py:132 +#: nova/cmd/dhcpbridge.py:131 #, python-format msgid "Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'" msgstr "" -#: nova/cmd/dhcpbridge.py:142 +#: nova/cmd/dhcpbridge.py:141 msgid "Environment variable 'NETWORK_ID' must be set." msgstr "" @@ -4622,673 +4449,516 @@ msgstr "" msgid "No db access allowed in nova-network: %s" msgstr "" -#: nova/compute/api.py:353 +#: nova/compute/api.py:355 msgid "Cannot run any more instances of this type." msgstr "Cannot run any more instances of this type." -#: nova/compute/api.py:360 +#: nova/compute/api.py:362 #, python-format msgid "Can only run %s more instances of this type." msgstr "Can only run %s more instances of this type." -#: nova/compute/api.py:372 +#: nova/compute/api.py:374 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d " "instances. %(msg)s" msgstr "" -#: nova/compute/api.py:376 +#: nova/compute/api.py:378 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d " "and %(max_count)d instances. %(msg)s" msgstr "" -#: nova/compute/api.py:397 +#: nova/compute/api.py:399 msgid "Metadata type should be dict." msgstr "" -#: nova/compute/api.py:403 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" -msgstr "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" - -#: nova/compute/api.py:415 -#, python-format -msgid "Metadata property key '%s' is not a string." -msgstr "" - -#: nova/compute/api.py:418 -#, python-format -msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string." -msgstr "" - -#: nova/compute/api.py:422 -msgid "Metadata property key blank" -msgstr "Metadata property key blank" - -#: nova/compute/api.py:425 +#: nova/compute/api.py:421 msgid "Metadata property key greater than 255 characters" msgstr "Metadata property key greater than 255 characters" -#: nova/compute/api.py:428 +#: nova/compute/api.py:424 msgid "Metadata property value greater than 255 characters" msgstr "Metadata property value greater than 255 characters" -#: nova/compute/api.py:565 -msgid "Failed to set instance name using multi_instance_display_name_template." -msgstr "" - -#: nova/compute/api.py:667 +#: nova/compute/api.py:663 #, fuzzy msgid "Cannot attach one or more volumes to multiple instances" msgstr "Unable to attach boot volume to instance %s" -#: nova/compute/api.py:709 +#: nova/compute/api.py:705 msgid "The requested availability zone is not available" msgstr "" -#: nova/compute/api.py:1110 +#: nova/compute/api.py:1107 msgid "" "Images with destination_type 'volume' need to have a non-zero size " "specified" msgstr "" -#: nova/compute/api.py:1141 +#: nova/compute/api.py:1138 msgid "More than one swap drive requested." msgstr "" -#: nova/compute/api.py:1290 -#: nova/tests/api/openstack/compute/test_servers.py:3145 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2484 +#: nova/compute/api.py:1277 +#: nova/tests/api/openstack/compute/test_servers.py:3199 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2488 msgid "" "Unable to launch multiple instances with a single configured port ID. " "Please launch your instance one by one with different ports." msgstr "" -#: nova/compute/api.py:1311 +#: nova/compute/api.py:1298 msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "" -#: nova/compute/api.py:1415 +#: nova/compute/api.py:1404 #, fuzzy msgid "instance termination disabled" msgstr "Going to start terminating instances" -#: nova/compute/api.py:1430 +#: nova/compute/api.py:1418 #, python-format msgid "Working on deleting snapshot %s from shelved instance..." msgstr "" -#: nova/compute/api.py:1437 +#: nova/compute/api.py:1425 #, python-format msgid "Failed to delete snapshot from shelved instance (%s)." msgstr "" -#: nova/compute/api.py:1441 -msgid "" -"Something wrong happened when trying to delete snapshot from shelved " -"instance." -msgstr "" - -#: nova/compute/api.py:1506 +#: nova/compute/api.py:1486 msgid "Instance is already in deleting state, ignoring this request" msgstr "" -#: nova/compute/api.py:1553 +#: nova/compute/api.py:1521 #, python-format msgid "" "Found an unconfirmed migration during delete, id: %(id)s, status: " "%(status)s" msgstr "" -#: nova/compute/api.py:1563 +#: nova/compute/api.py:1531 msgid "Instance may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1580 +#: nova/compute/api.py:1548 #, python-format msgid "Migration %s may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1615 +#: nova/compute/api.py:1583 #, python-format msgid "Flavor %d not found" msgstr "" -#: nova/compute/api.py:1633 +#: nova/compute/api.py:1603 #, fuzzy, python-format msgid "instance's host %s is down, deleting from database" msgstr "host for instance is down, deleting from database" -#: nova/compute/api.py:1660 +#: nova/compute/api.py:1630 #, python-format msgid "Ignoring volume cleanup failure due to %s" msgstr "Ignoring volume cleanup failure due to %s" -#: nova/compute/api.py:2061 +#: nova/compute/api.py:2030 #, python-format msgid "snapshot for %s" msgstr "snapshot for %s" -#: nova/compute/api.py:2399 +#: nova/compute/api.py:2368 msgid "Resize to zero disk flavor is not allowed." msgstr "" -#: nova/compute/api.py:2438 +#: nova/compute/api.py:2407 #, fuzzy, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance." msgstr "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s" -#: nova/compute/api.py:2613 +#: nova/compute/api.py:2582 msgid "Cannot rescue a volume-backed instance" msgstr "" -#: nova/compute/api.py:2840 +#: nova/compute/api.py:2809 msgid "Volume must be attached in order to detach." msgstr "Volume must be attached in order to detach." -#: nova/compute/api.py:2860 +#: nova/compute/api.py:2829 msgid "Old volume is attached to a different instance." msgstr "" -#: nova/compute/api.py:2863 +#: nova/compute/api.py:2832 msgid "New volume must be detached in order to swap." msgstr "" -#: nova/compute/api.py:2866 +#: nova/compute/api.py:2835 msgid "New volume must be the same size or larger." msgstr "" -#: nova/compute/api.py:3067 +#: nova/compute/api.py:3042 #, python-format msgid "Instance compute service state on %s expected to be down, but it was up." msgstr "" -#: nova/compute/api.py:3369 +#: nova/compute/api.py:3347 msgid "Host aggregate is not empty" msgstr "" -#: nova/compute/api.py:3402 +#: nova/compute/api.py:3380 #, python-format msgid "More than 1 AZ for host %s" msgstr "" -#: nova/compute/api.py:3437 +#: nova/compute/api.py:3415 #, python-format msgid "Host already in availability zone %s" msgstr "" -#: nova/compute/api.py:3525 nova/tests/compute/test_keypairs.py:135 +#: nova/compute/api.py:3503 nova/tests/compute/test_keypairs.py:137 msgid "Keypair name contains unsafe characters" msgstr "Keypair name contains unsafe characters" -#: nova/compute/api.py:3529 nova/tests/compute/test_keypairs.py:127 -#: nova/tests/compute/test_keypairs.py:131 -msgid "Keypair name must be between 1 and 255 characters long" -msgstr "Keypair name must be between 1 and 255 characters long" +#: nova/compute/api.py:3509 nova/tests/compute/test_keypairs.py:127 +#: nova/tests/compute/test_keypairs.py:132 +msgid "Keypair name must be string and between 1 and 255 characters long" +msgstr "" -#: nova/compute/api.py:3617 +#: nova/compute/api.py:3597 #, python-format msgid "Security group %s is not a string or unicode" msgstr "Security group %s is not a string or unicode" -#: nova/compute/api.py:3620 -#, python-format -msgid "Security group %s cannot be empty." -msgstr "Security group %s cannot be empty." - -#: nova/compute/api.py:3628 +#: nova/compute/api.py:3607 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " "limited to '%(allowed)s'." msgstr "" -#: nova/compute/api.py:3634 -#, python-format -msgid "Security group %s should not be greater than 255 characters." -msgstr "Security group %s should not be greater than 255 characters." - -#: nova/compute/api.py:3652 +#: nova/compute/api.py:3627 msgid "Quota exceeded, too many security groups." msgstr "Quota exceeded, too many security groups." -#: nova/compute/api.py:3655 +#: nova/compute/api.py:3630 #, python-format msgid "Create Security Group %s" msgstr "Create Security Group %s" -#: nova/compute/api.py:3667 +#: nova/compute/api.py:3642 #, python-format msgid "Security group %s already exists" msgstr "Security group %s already exists" -#: nova/compute/api.py:3680 +#: nova/compute/api.py:3655 #, fuzzy, python-format msgid "Unable to update system group '%s'" msgstr "Unable to destroy vbd %s" -#: nova/compute/api.py:3742 +#: nova/compute/api.py:3717 #, fuzzy, python-format msgid "Unable to delete system group '%s'" msgstr "Unable to destroy vbd %s" -#: nova/compute/api.py:3747 +#: nova/compute/api.py:3722 msgid "Security group is still in use" msgstr "Security group is still in use" -#: nova/compute/api.py:3757 -msgid "Failed to update usages deallocating security group" -msgstr "Failed to update usages deallocating security group" - -#: nova/compute/api.py:3760 +#: nova/compute/api.py:3735 #, python-format msgid "Delete security group %s" msgstr "Delete security group %s" -#: nova/compute/api.py:3836 nova/compute/api.py:3919 +#: nova/compute/api.py:3811 nova/compute/api.py:3894 #, python-format msgid "Rule (%s) not found" msgstr "Rule (%s) not found" -#: nova/compute/api.py:3852 +#: nova/compute/api.py:3827 msgid "Quota exceeded, too many security group rules." msgstr "Quota exceeded, too many security group rules." -#: nova/compute/api.py:3855 +#: nova/compute/api.py:3830 #, python-format msgid "" "Security group %(name)s added %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3870 +#: nova/compute/api.py:3845 #, python-format msgid "" "Security group %(name)s removed %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3926 +#: nova/compute/api.py:3901 msgid "Security group id should be integer" msgstr "Security group id should be integer" -#: nova/compute/claims.py:135 +#: nova/compute/claims.py:126 #, python-format -msgid "" -"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs " -"%(vcpus)d" +msgid "Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB" msgstr "" -"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs " -"%(vcpus)d" -#: nova/compute/claims.py:150 +#: nova/compute/claims.py:140 msgid "Claim successful" msgstr "" -#: nova/compute/claims.py:153 +#: nova/compute/claims.py:143 msgid "memory" msgstr "" -#: nova/compute/claims.py:162 +#: nova/compute/claims.py:152 msgid "disk" msgstr "" -#: nova/compute/claims.py:177 nova/compute/claims.py:249 +#: nova/compute/claims.py:167 nova/compute/claims.py:230 msgid "Claim pci failed." msgstr "" -#: nova/compute/claims.py:180 -msgid "CPUs" -msgstr "" - -#: nova/compute/claims.py:192 +#: nova/compute/claims.py:177 #, fuzzy, python-format msgid "Total %(type)s: %(total)d %(unit)s, used: %(used).02f %(unit)s" msgstr "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d" -#: nova/compute/claims.py:199 +#: nova/compute/claims.py:184 #, fuzzy, python-format msgid "%(type)s limit not specified, defaulting to unlimited" msgstr "Disk limit not specified, defaulting to unlimited" -#: nova/compute/claims.py:206 +#: nova/compute/claims.py:191 #, fuzzy, python-format msgid "%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f %(unit)s" msgstr "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB" -#: nova/compute/claims.py:212 +#: nova/compute/claims.py:197 #, python-format msgid "Free %(type)s %(free).02f %(unit)s < requested %(requested)d %(unit)s" msgstr "" -#: nova/compute/flavors.py:109 +#: nova/compute/flavors.py:110 msgid "" "Flavor names can only contain alphanumeric characters, periods, dashes, " "underscores and spaces." msgstr "" -#: nova/compute/flavors.py:119 +#: nova/compute/flavors.py:120 msgid "id cannot contain leading and/or trailing whitespace(s)" msgstr "" -#: nova/compute/flavors.py:129 +#: nova/compute/flavors.py:130 msgid "" "Flavor id can only contain letters from A-Z (both cases), periods, " "dashes, underscores and spaces." msgstr "" -#: nova/compute/flavors.py:150 +#: nova/compute/flavors.py:151 #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" -#: nova/compute/flavors.py:161 +#: nova/compute/flavors.py:162 msgid "is_public must be a boolean" msgstr "" -#: nova/compute/flavors.py:166 -#, python-format -msgid "DB error: %s" -msgstr "DB error: %s" - -#: nova/compute/flavors.py:177 -#, python-format -msgid "Instance type %s not found for deletion" -msgstr "Instance type %s not found for deletion" - -#: nova/compute/flavors.py:327 +#: nova/compute/flavors.py:328 msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" -#: nova/compute/manager.py:283 +#: nova/compute/manager.py:284 #, python-format msgid "Task possibly preempted: %s" msgstr "" -#: nova/compute/manager.py:365 nova/compute/manager.py:2885 -#, python-format -msgid "Error while trying to clean up image %s" -msgstr "" - -#: nova/compute/manager.py:506 +#: nova/compute/manager.py:508 msgid "Instance event failed" msgstr "" -#: nova/compute/manager.py:605 +#: nova/compute/manager.py:608 #, python-format msgid "%s is not a valid node managed by this compute host." msgstr "" -#: nova/compute/manager.py:704 +#: nova/compute/manager.py:714 #, fuzzy, python-format msgid "" "Deleting instance as its host (%(instance_host)s) is not equal to our " "host (%(our_host)s)." msgstr "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." -#: nova/compute/manager.py:719 +#: nova/compute/manager.py:729 msgid "Instance has been marked deleted already, removing it from the hypervisor." msgstr "" -#: nova/compute/manager.py:739 +#: nova/compute/manager.py:749 msgid "" "Hypervisor driver does not support instance shared storage check, " "assuming it's not on shared storage" msgstr "" -#: nova/compute/manager.py:745 -#, fuzzy -msgid "Failed to check if instance shared" -msgstr "Failed to terminate instance" - -#: nova/compute/manager.py:811 nova/compute/manager.py:862 -msgid "Failed to complete a deletion" -msgstr "" - -#: nova/compute/manager.py:844 +#: nova/compute/manager.py:854 msgid "" "Service started deleting the instance during the previous run, but did " "not finish. Restarting the deletion now." msgstr "" -#: nova/compute/manager.py:885 +#: nova/compute/manager.py:895 #, python-format msgid "" "Instance in transitional state (%(task_state)s) at start-up and power " "state is (%(power_state)s), clearing task state" msgstr "" -#: nova/compute/manager.py:903 -msgid "Failed to stop instance" -msgstr "" - -#: nova/compute/manager.py:915 -msgid "Failed to start instance" -msgstr "" - -#: nova/compute/manager.py:940 -msgid "Failed to revert crashed migration" -msgstr "" - -#: nova/compute/manager.py:943 +#: nova/compute/manager.py:953 msgid "Instance found in migrating state during startup. Resetting task_state" msgstr "" -#: nova/compute/manager.py:960 +#: nova/compute/manager.py:970 msgid "Rebooting instance after nova-compute restart." msgstr "Rebooting instance after nova-compute restart." -#: nova/compute/manager.py:970 +#: nova/compute/manager.py:980 msgid "Hypervisor driver does not support resume guests" msgstr "Hypervisor driver does not support resume guests" -#: nova/compute/manager.py:975 +#: nova/compute/manager.py:985 #, fuzzy msgid "Failed to resume instance" msgstr "Failed to suspend instance" -#: nova/compute/manager.py:984 +#: nova/compute/manager.py:994 msgid "Hypervisor driver does not support firewall rules" msgstr "Hypervisor driver does not support firewall rules" -#: nova/compute/manager.py:1009 +#: nova/compute/manager.py:1019 #, python-format msgid "VM %(state)s (Lifecycle Event)" msgstr "" -#: nova/compute/manager.py:1025 +#: nova/compute/manager.py:1035 #, fuzzy, python-format msgid "Unexpected power state %d" msgstr "Unexpected status code" -#: nova/compute/manager.py:1130 +#: nova/compute/manager.py:1140 msgid "Hypervisor driver does not support security groups." msgstr "" -#: nova/compute/manager.py:1168 +#: nova/compute/manager.py:1178 #, python-format msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" -#: nova/compute/manager.py:1225 nova/compute/manager.py:1982 +#: nova/compute/manager.py:1235 nova/compute/manager.py:2057 msgid "Success" msgstr "" -#: nova/compute/manager.py:1249 +#: nova/compute/manager.py:1259 msgid "Instance disappeared before we could start it" msgstr "" -#: nova/compute/manager.py:1276 +#: nova/compute/manager.py:1286 msgid "Anti-affinity instance group policy was violated." msgstr "" -#: nova/compute/manager.py:1353 -msgid "Failed to dealloc network for deleted instance" -msgstr "Failed to dealloc network for deleted instance" - -#: nova/compute/manager.py:1358 +#: nova/compute/manager.py:1369 msgid "Instance disappeared during build" msgstr "" -#: nova/compute/manager.py:1374 -msgid "Failed to dealloc network for failed instance" -msgstr "" - -#: nova/compute/manager.py:1401 +#: nova/compute/manager.py:1412 #, fuzzy, python-format msgid "Error: %s" msgstr "DB error: %s" -#: nova/compute/manager.py:1447 nova/compute/manager.py:3509 -msgid "Error trying to reschedule" -msgstr "Error trying to reschedule" - -#: nova/compute/manager.py:1503 +#: nova/compute/manager.py:1514 msgid "Instance build timed out. Set to error state." msgstr "Instance build timed out. Set to error state." -#: nova/compute/manager.py:1513 nova/compute/manager.py:1873 +#: nova/compute/manager.py:1524 nova/compute/manager.py:1888 msgid "Starting instance..." msgstr "Starting instance..." -#: nova/compute/manager.py:1531 +#: nova/compute/manager.py:1542 #, python-format msgid "" "Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0." msgstr "" -#: nova/compute/manager.py:1556 -#, python-format -msgid "Instance failed network setup after %(attempts)d attempt(s)" -msgstr "" - -#: nova/compute/manager.py:1560 +#: nova/compute/manager.py:1571 #, python-format msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" -#: nova/compute/manager.py:1741 -msgid "Instance failed block device setup" -msgstr "Instance failed block device setup" - -#: nova/compute/manager.py:1761 nova/compute/manager.py:2098 -#: nova/compute/manager.py:4041 -msgid "Instance failed to spawn" -msgstr "Instance failed to spawn" - -#: nova/compute/manager.py:1941 -msgid "Unexpected build failure, not rescheduling build." -msgstr "" - -#: nova/compute/manager.py:2006 +#: nova/compute/manager.py:2020 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2012 nova/compute/manager.py:2060 -msgid "Failed to allocate network(s)" -msgstr "" - -#: nova/compute/manager.py:2016 nova/compute/manager.py:2062 +#: nova/compute/manager.py:2030 nova/compute/manager.py:2080 msgid "Failed to allocate the network(s), not rescheduling." msgstr "" -#: nova/compute/manager.py:2086 -msgid "Failure prepping block device" -msgstr "" - -#: nova/compute/manager.py:2088 +#: nova/compute/manager.py:2106 msgid "Failure prepping block device." msgstr "" -#: nova/compute/manager.py:2111 +#: nova/compute/manager.py:2127 msgid "Could not clean up failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2121 -msgid "Failed to deallocate networks" -msgstr "" - -#: nova/compute/manager.py:2142 -msgid "Failed to cleanup volumes for failed build, not rescheduling" -msgstr "" - -#: nova/compute/manager.py:2181 +#: nova/compute/manager.py:2185 #, fuzzy msgid "Failed to deallocate network for instance." msgstr "Failed to dealloc network for deleted instance" -#: nova/compute/manager.py:2202 +#: nova/compute/manager.py:2206 #, python-format msgid "%(action_str)s instance" msgstr "%(action_str)s instance" -#: nova/compute/manager.py:2246 -#, python-format -msgid "Ignoring DiskNotFound: %s" -msgstr "Ignoring DiskNotFound: %s" - -#: nova/compute/manager.py:2249 -#, python-format -msgid "Ignoring VolumeNotFound: %s" -msgstr "Ignoring VolumeNotFound: %s" - -#: nova/compute/manager.py:2353 +#: nova/compute/manager.py:2361 msgid "Instance disappeared during terminate" msgstr "" -#: nova/compute/manager.py:2359 nova/compute/manager.py:3689 -#: nova/compute/manager.py:5769 -msgid "Setting instance vm_state to ERROR" -msgstr "" - -#: nova/compute/manager.py:2539 +#: nova/compute/manager.py:2547 msgid "Rebuilding instance" msgstr "Rebuilding instance" -#: nova/compute/manager.py:2552 +#: nova/compute/manager.py:2560 msgid "Invalid state of instance files on shared storage" msgstr "" -#: nova/compute/manager.py:2556 +#: nova/compute/manager.py:2564 msgid "disk on shared storage, recreating using existing disk" msgstr "" -#: nova/compute/manager.py:2560 +#: nova/compute/manager.py:2568 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "" -#: nova/compute/manager.py:2571 nova/compute/manager.py:4884 -#, fuzzy, python-format -msgid "Failed to get compute_info for %s" -msgstr "Failed to get info for disk %s" - -#: nova/compute/manager.py:2647 +#: nova/compute/manager.py:2655 #, python-format msgid "bringing vm to original state: '%s'" msgstr "" -#: nova/compute/manager.py:2678 +#: nova/compute/manager.py:2686 #, fuzzy, python-format msgid "Detaching from volume api: %s" msgstr "Attach boot from volume failed: %s" -#: nova/compute/manager.py:2705 +#: nova/compute/manager.py:2713 msgid "Rebooting instance" msgstr "Rebooting instance" -#: nova/compute/manager.py:2722 +#: nova/compute/manager.py:2730 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " @@ -5297,25 +4967,25 @@ msgstr "" "trying to reboot a non-running instance: (state: %(state)s expected: " "%(running)s)" -#: nova/compute/manager.py:2758 +#: nova/compute/manager.py:2766 msgid "Reboot failed but instance is running" msgstr "" -#: nova/compute/manager.py:2766 +#: nova/compute/manager.py:2774 #, python-format msgid "Cannot reboot instance: %s" msgstr "" -#: nova/compute/manager.py:2778 +#: nova/compute/manager.py:2786 #, fuzzy msgid "Instance disappeared during reboot" msgstr "instance %s: rebooted" -#: nova/compute/manager.py:2846 +#: nova/compute/manager.py:2854 msgid "instance snapshotting" msgstr "instance snapshotting" -#: nova/compute/manager.py:2852 +#: nova/compute/manager.py:2860 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " @@ -5324,197 +4994,162 @@ msgstr "" "trying to snapshot a non-running instance: (state: %(state)s expected: " "%(running)s)" -#: nova/compute/manager.py:2890 +#: nova/compute/manager.py:2893 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:2898 msgid "Image not found during snapshot" msgstr "" -#: nova/compute/manager.py:2972 +#: nova/compute/manager.py:2980 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "Failed to set admin password. Instance %s is not running" -#: nova/compute/manager.py:2979 +#: nova/compute/manager.py:2987 msgid "Root password set" msgstr "Root password set" -#: nova/compute/manager.py:2984 +#: nova/compute/manager.py:2992 #, fuzzy msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "set_admin_password is not implemented by this driver." -#: nova/compute/manager.py:2997 -#, python-format -msgid "set_admin_password failed: %s" -msgstr "set_admin_password failed: %s" - -#: nova/compute/manager.py:3003 +#: nova/compute/manager.py:3011 msgid "error setting admin password" msgstr "error setting admin password" -#: nova/compute/manager.py:3019 +#: nova/compute/manager.py:3027 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " "expected: %(expected_state)s)" msgstr "" -#: nova/compute/manager.py:3024 +#: nova/compute/manager.py:3032 #, python-format msgid "injecting file to %s" msgstr "" -#: nova/compute/manager.py:3042 +#: nova/compute/manager.py:3050 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" msgstr "" -#: nova/compute/manager.py:3061 +#: nova/compute/manager.py:3069 msgid "Rescuing" msgstr "Rescuing" -#: nova/compute/manager.py:3082 -#, fuzzy -msgid "Error trying to Rescue Instance" -msgstr "Error trying to reschedule" - -#: nova/compute/manager.py:3086 +#: nova/compute/manager.py:3094 #, fuzzy, python-format msgid "Driver Error: %s" msgstr "DB error: %s" -#: nova/compute/manager.py:3109 +#: nova/compute/manager.py:3117 msgid "Unrescuing" msgstr "Unrescuing" -#: nova/compute/manager.py:3180 +#: nova/compute/manager.py:3188 #, python-format msgid "Migration %s is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3185 +#: nova/compute/manager.py:3193 #, python-format msgid "Migration %s is already confirmed" msgstr "" -#: nova/compute/manager.py:3189 +#: nova/compute/manager.py:3197 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " "confirmation process" msgstr "" -#: nova/compute/manager.py:3203 +#: nova/compute/manager.py:3211 msgid "Instance is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3384 +#: nova/compute/manager.py:3392 #, fuzzy, python-format msgid "Updating instance to original state: '%s'" msgstr "Setting instance to %(state)s state." -#: nova/compute/manager.py:3407 +#: nova/compute/manager.py:3415 #, fuzzy msgid "Instance has no source host" msgstr "Instance has no volume." -#: nova/compute/manager.py:3413 +#: nova/compute/manager.py:3421 msgid "destination same as source!" msgstr "destination same as source!" -#: nova/compute/manager.py:3431 +#: nova/compute/manager.py:3439 msgid "Migrating" msgstr "Migrating" -#: nova/compute/manager.py:3695 -#, python-format -msgid "Failed to rollback quota for failed finish_resize: %s" -msgstr "" - -#: nova/compute/manager.py:3755 +#: nova/compute/manager.py:3771 msgid "Pausing" msgstr "Pausing" -#: nova/compute/manager.py:3772 +#: nova/compute/manager.py:3788 msgid "Unpausing" msgstr "Unpausing" -#: nova/compute/manager.py:3813 nova/compute/manager.py:3830 +#: nova/compute/manager.py:3829 nova/compute/manager.py:3846 msgid "Retrieving diagnostics" msgstr "Retrieving diagnostics" -#: nova/compute/manager.py:3866 +#: nova/compute/manager.py:3882 msgid "Resuming" msgstr "Resuming" -#: nova/compute/manager.py:4084 +#: nova/compute/manager.py:4102 msgid "Get console output" msgstr "Get console output" -#: nova/compute/manager.py:4283 +#: nova/compute/manager.py:4301 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "Attaching volume %(volume_id)s to %(mountpoint)s" -#: nova/compute/manager.py:4292 -#, python-format -msgid "Failed to attach %(volume_id)s at %(mountpoint)s" -msgstr "" - -#: nova/compute/manager.py:4308 +#: nova/compute/manager.py:4326 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "Detach volume %(volume_id)s from mountpoint %(mp)s" -#: nova/compute/manager.py:4319 +#: nova/compute/manager.py:4337 msgid "Detaching volume from unknown instance" msgstr "Detaching volume from unknown instance" -#: nova/compute/manager.py:4331 -#, fuzzy, python-format -msgid "Failed to detach volume %(volume_id)s from %(mp)s" -msgstr "Faild to detach volume %(volume_id)s from %(mp)s" - -#: nova/compute/manager.py:4404 -#, python-format -msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" -msgstr "" - -#: nova/compute/manager.py:4411 -#, python-format -msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" -msgstr "" - -#: nova/compute/manager.py:4504 +#: nova/compute/manager.py:4525 #, fuzzy, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "allocate_for_instance() for %s" -#: nova/compute/manager.py:4524 +#: nova/compute/manager.py:4549 #, python-format msgid "Port %s is not attached" msgstr "" -#: nova/compute/manager.py:4536 nova/tests/compute/test_compute.py:10612 +#: nova/compute/manager.py:4561 nova/tests/compute/test_compute.py:10659 #, python-format msgid "Host %s not found" msgstr "" -#: nova/compute/manager.py:4690 -#, python-format -msgid "Pre live migration failed at %s" -msgstr "" - -#: nova/compute/manager.py:4753 +#: nova/compute/manager.py:4779 msgid "_post_live_migration() is started.." msgstr "_post_live_migration() is started.." -#: nova/compute/manager.py:4825 +#: nova/compute/manager.py:4855 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "" -#: nova/compute/manager.py:4827 +#: nova/compute/manager.py:4857 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." @@ -5522,15 +5157,15 @@ msgstr "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." -#: nova/compute/manager.py:4852 +#: nova/compute/manager.py:4882 msgid "Post operation of migration started" msgstr "Post operation of migration started" -#: nova/compute/manager.py:5057 +#: nova/compute/manager.py:5087 msgid "An error occurred while refreshing the network cache." msgstr "" -#: nova/compute/manager.py:5110 +#: nova/compute/manager.py:5140 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " @@ -5539,12 +5174,12 @@ msgstr "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" -#: nova/compute/manager.py:5115 +#: nova/compute/manager.py:5145 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "Setting migration %(migration_id)s to error: %(reason)s" -#: nova/compute/manager.py:5124 +#: nova/compute/manager.py:5154 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " @@ -5553,30 +5188,26 @@ msgstr "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" -#: nova/compute/manager.py:5134 +#: nova/compute/manager.py:5164 #, python-format msgid "Instance %s not found" msgstr "" -#: nova/compute/manager.py:5139 +#: nova/compute/manager.py:5169 msgid "In ERROR state" msgstr "In ERROR state" -#: nova/compute/manager.py:5146 +#: nova/compute/manager.py:5176 #, fuzzy, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "In states %(vm_state)s/%(task_state)s, notRESIZED/None" -#: nova/compute/manager.py:5157 +#: nova/compute/manager.py:5187 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" -#: nova/compute/manager.py:5186 -msgid "Periodic task failed to offload instance." -msgstr "" - -#: nova/compute/manager.py:5206 +#: nova/compute/manager.py:5236 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " @@ -5585,20 +5216,15 @@ msgstr "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." -#: nova/compute/manager.py:5226 -#, python-format -msgid "Failed to generate usage audit for instance on host %s" -msgstr "Failed to generate usage audit for instance on host %s" - -#: nova/compute/manager.py:5255 +#: nova/compute/manager.py:5285 msgid "Updating bandwidth usage cache" msgstr "Updating bandwidth usage cache" -#: nova/compute/manager.py:5277 +#: nova/compute/manager.py:5307 msgid "Bandwidth usage not supported by hypervisor." msgstr "" -#: nova/compute/manager.py:5400 +#: nova/compute/manager.py:5430 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " @@ -5607,7 +5233,7 @@ msgstr "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." -#: nova/compute/manager.py:5466 +#: nova/compute/manager.py:5496 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" @@ -5616,115 +5242,110 @@ msgstr "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" -#: nova/compute/manager.py:5479 +#: nova/compute/manager.py:5509 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" -#: nova/compute/manager.py:5504 +#: nova/compute/manager.py:5534 msgid "Instance shutdown by itself. Calling the stop API." msgstr "Instance shutdown by itself. Calling the stop API." -#: nova/compute/manager.py:5516 nova/compute/manager.py:5525 -#: nova/compute/manager.py:5556 nova/compute/manager.py:5567 -msgid "error during stop() in sync_power_state." -msgstr "error during stop() in sync_power_state." - -#: nova/compute/manager.py:5520 +#: nova/compute/manager.py:5553 #, fuzzy msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "Instance is paused or suspended unexpectedly. Calling the stop API." -#: nova/compute/manager.py:5536 +#: nova/compute/manager.py:5569 #, fuzzy msgid "Instance is paused unexpectedly. Ignore." msgstr "Instance is paused or suspended unexpectedly. Calling the stop API." -#: nova/compute/manager.py:5542 +#: nova/compute/manager.py:5575 msgid "Instance is unexpectedly not found. Ignore." msgstr "" -#: nova/compute/manager.py:5548 +#: nova/compute/manager.py:5581 msgid "Instance is not stopped. Calling the stop API." msgstr "Instance is not stopped. Calling the stop API." -#: nova/compute/manager.py:5562 +#: nova/compute/manager.py:5595 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5576 +#: nova/compute/manager.py:5609 msgid "Instance is not (soft-)deleted." msgstr "Instance is not (soft-)deleted." -#: nova/compute/manager.py:5605 +#: nova/compute/manager.py:5639 msgid "Reclaiming deleted instance" msgstr "Reclaiming deleted instance" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5643 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5634 +#: nova/compute/manager.py:5668 #, fuzzy, python-format msgid "Deleting orphan compute node %s" msgstr "Loading compute driver '%s'" -#: nova/compute/manager.py:5642 nova/compute/resource_tracker.py:391 +#: nova/compute/manager.py:5676 nova/compute/resource_tracker.py:406 #, python-format msgid "No service record for host %s" msgstr "No service record for host %s" -#: nova/compute/manager.py:5682 +#: nova/compute/manager.py:5716 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5688 +#: nova/compute/manager.py:5722 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" " still present on host." msgstr "" -#: nova/compute/manager.py:5697 +#: nova/compute/manager.py:5731 msgid "set_bootable is not implemented for the current driver" msgstr "" -#: nova/compute/manager.py:5702 +#: nova/compute/manager.py:5736 msgid "Failed to power off instance" msgstr "" -#: nova/compute/manager.py:5706 +#: nova/compute/manager.py:5740 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5716 +#: nova/compute/manager.py:5750 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5720 +#: nova/compute/manager.py:5754 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "" -#: nova/compute/manager.py:5752 +#: nova/compute/manager.py:5786 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "" -#: nova/compute/manager.py:5762 +#: nova/compute/manager.py:5796 #, fuzzy, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "Setting instance to %(state)s state." -#: nova/compute/resource_tracker.py:105 +#: nova/compute/resource_tracker.py:111 #, fuzzy msgid "" "Host field should not be set on the instance until resources have been " @@ -5733,22 +5354,22 @@ msgstr "" "Host field should be not be set on the instance until resources have been" " claimed." -#: nova/compute/resource_tracker.py:110 +#: nova/compute/resource_tracker.py:116 msgid "" "Node field should not be set on the instance until resources have been " "claimed." msgstr "" -#: nova/compute/resource_tracker.py:272 +#: nova/compute/resource_tracker.py:276 #, python-format msgid "Cannot get the metrics from %s." msgstr "" -#: nova/compute/resource_tracker.py:291 +#: nova/compute/resource_tracker.py:295 msgid "Auditing locally available compute resources" msgstr "" -#: nova/compute/resource_tracker.py:296 +#: nova/compute/resource_tracker.py:300 msgid "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." @@ -5756,62 +5377,64 @@ msgstr "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." -#: nova/compute/resource_tracker.py:371 +#: nova/compute/resource_tracker.py:375 #, fuzzy, python-format msgid "Compute_service record created for %(host)s:%(node)s" msgstr "Compute_service record created for %s " -#: nova/compute/resource_tracker.py:377 +#: nova/compute/resource_tracker.py:381 #, fuzzy, python-format msgid "Compute_service record updated for %(host)s:%(node)s" msgstr "Compute_service record updated for %s " -#: nova/compute/resource_tracker.py:430 +#: nova/compute/resource_tracker.py:446 #, python-format -msgid "Free ram (MB): %s" -msgstr "Free ram (MB): %s" +msgid "" +"Total physical ram (MB): %(pram)s, total allocated virtual ram (MB): " +"%(vram)s" +msgstr "" -#: nova/compute/resource_tracker.py:431 +#: nova/compute/resource_tracker.py:450 #, python-format msgid "Free disk (GB): %s" msgstr "Free disk (GB): %s" -#: nova/compute/resource_tracker.py:436 +#: nova/compute/resource_tracker.py:454 #, python-format -msgid "Free VCPUS: %s" -msgstr "Free VCPUS: %s" +msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s" +msgstr "" -#: nova/compute/resource_tracker.py:438 +#: nova/compute/resource_tracker.py:458 msgid "Free VCPU information unavailable" msgstr "Free VCPU information unavailable" -#: nova/compute/resource_tracker.py:441 +#: nova/compute/resource_tracker.py:461 #, python-format msgid "PCI stats: %s" msgstr "" -#: nova/compute/resource_tracker.py:486 +#: nova/compute/resource_tracker.py:512 #, fuzzy, python-format msgid "Updating from migration %s" msgstr "Starting finish_migration" -#: nova/compute/resource_tracker.py:553 +#: nova/compute/resource_tracker.py:577 #, fuzzy msgid "Instance not resizing, skipping migration." msgstr "VM is not present, skipping destroy..." -#: nova/compute/resource_tracker.py:568 +#: nova/compute/resource_tracker.py:592 msgid "Flavor could not be found, skipping migration." msgstr "" -#: nova/compute/resource_tracker.py:658 +#: nova/compute/resource_tracker.py:682 #, python-format msgid "" "Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB " "memory)" msgstr "" -#: nova/compute/resource_tracker.py:672 +#: nova/compute/resource_tracker.py:696 #, python-format msgid "Missing keys: %s" msgstr "Missing keys: %s" @@ -5825,24 +5448,8 @@ msgstr "No compute host specified" msgid "Unable to find host for Instance %s" msgstr "Unable to find host for Instance %s" -#: nova/compute/utils.py:204 -#, python-format -msgid "Can't access image %(image_id)s: %(error)s" -msgstr "" - -#: nova/compute/utils.py:328 -#, python-format -msgid "" -"No host name specified for the notification of HostAPI.%s and it will be " -"ignored" -msgstr "" - -#: nova/compute/utils.py:456 -#, python-format -msgid "" -"Value of 0 or None specified for %s. This behaviour will change in " -"meaning in the K release, to mean 'call at the default rate' rather than " -"'do not call'. To keep the 'do not call' behaviour, use a negative value." +#: nova/compute/stats.py:49 +msgid "Unexpected type adding stats" msgstr "" #: nova/compute/monitors/__init__.py:176 @@ -5867,47 +5474,47 @@ msgstr "" msgid "Not all properties needed are implemented in the compute driver: %s" msgstr "" -#: nova/conductor/api.py:318 +#: nova/conductor/api.py:315 msgid "nova-conductor connection established successfully" msgstr "" -#: nova/conductor/api.py:323 +#: nova/conductor/api.py:320 msgid "" "Timed out waiting for nova-conductor. Is it running? Or did this service" " start before nova-conductor? Reattempting establishment of nova-" "conductor connection..." msgstr "" -#: nova/conductor/manager.py:124 +#: nova/conductor/manager.py:123 #, python-format msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s" msgstr "" -#: nova/conductor/manager.py:523 +#: nova/conductor/manager.py:519 msgid "No valid host found for cold migrate" msgstr "" -#: nova/conductor/manager.py:586 +#: nova/conductor/manager.py:582 #, python-format msgid "" "Migration of instance %(instance_id)s to host %(dest)s unexpectedly " "failed." msgstr "" -#: nova/conductor/manager.py:673 +#: nova/conductor/manager.py:669 #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "" -#: nova/conductor/manager.py:696 +#: nova/conductor/manager.py:692 msgid "No valid host found for unshelve instance" msgstr "" -#: nova/conductor/manager.py:700 +#: nova/conductor/manager.py:696 msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED" msgstr "" -#: nova/conductor/manager.py:737 +#: nova/conductor/manager.py:733 msgid "No valid host found for rebuild" msgstr "" @@ -5984,85 +5591,85 @@ msgstr "" msgid "Failed to notify cells of instance update" msgstr "Failed to reboot instance" -#: nova/db/api.py:1685 +#: nova/db/api.py:1683 msgid "Failed to notify cells of bw_usage update" msgstr "" -#: nova/db/sqlalchemy/api.py:204 +#: nova/db/sqlalchemy/api.py:207 #, python-format msgid "Deadlock detected when running '%(func_name)s': Retrying..." msgstr "" -#: nova/db/sqlalchemy/api.py:245 +#: nova/db/sqlalchemy/api.py:248 msgid "model or base_model parameter should be subclass of NovaBase" msgstr "" -#: nova/db/sqlalchemy/api.py:258 -#: nova/openstack/common/db/sqlalchemy/utils.py:174 -#: nova/virt/baremetal/db/sqlalchemy/api.py:60 +#: nova/db/sqlalchemy/api.py:261 +#: nova/openstack/common/db/sqlalchemy/utils.py:173 +#: nova/virt/baremetal/db/sqlalchemy/api.py:61 #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Unrecognized read_deleted value '%s'" -#: nova/db/sqlalchemy/api.py:750 +#: nova/db/sqlalchemy/api.py:753 #, fuzzy, python-format msgid "Invalid floating ip id %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:855 +#: nova/db/sqlalchemy/api.py:858 msgid "Failed to update usages bulk deallocating floating IP" msgstr "" -#: nova/db/sqlalchemy/api.py:1011 +#: nova/db/sqlalchemy/api.py:1007 #, fuzzy, python-format msgid "Invalid floating IP %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:1313 nova/db/sqlalchemy/api.py:1352 +#: nova/db/sqlalchemy/api.py:1310 nova/db/sqlalchemy/api.py:1349 #, fuzzy, python-format msgid "Invalid fixed IP Address %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:1487 +#: nova/db/sqlalchemy/api.py:1484 #, fuzzy, python-format msgid "Invalid virtual interface address %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:1581 +#: nova/db/sqlalchemy/api.py:1578 #, python-format msgid "" "Unknown osapi_compute_unique_server_name_scope value: %s Flag must be " "empty, \"global\" or \"project\"" msgstr "" -#: nova/db/sqlalchemy/api.py:1741 +#: nova/db/sqlalchemy/api.py:1738 #, fuzzy, python-format msgid "Invalid instance id %s in request" msgstr "instance %s: rescued" -#: nova/db/sqlalchemy/api.py:2019 +#: nova/db/sqlalchemy/api.py:2017 #, python-format msgid "Invalid field name: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:3248 +#: nova/db/sqlalchemy/api.py:3246 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:4899 +#: nova/db/sqlalchemy/api.py:4898 #, python-format msgid "" "Volume(%s) has lower stats then what is in the database. Instance must " "have been rebooted or crashed. Updating totals." msgstr "" -#: nova/db/sqlalchemy/api.py:5256 +#: nova/db/sqlalchemy/api.py:5262 #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" -#: nova/db/sqlalchemy/api.py:5646 +#: nova/db/sqlalchemy/api.py:5652 #, python-format msgid "IntegrityError detected when archiving table %s" msgstr "" @@ -6095,15 +5702,15 @@ msgstr "" msgid "Extra column %(table)s.%(column)s in shadow table" msgstr "" -#: nova/db/sqlalchemy/utils.py:105 +#: nova/db/sqlalchemy/utils.py:103 msgid "Specify `table_name` or `table` param" msgstr "" -#: nova/db/sqlalchemy/utils.py:108 +#: nova/db/sqlalchemy/utils.py:106 msgid "Specify only one param `table_name` `table`" msgstr "" -#: nova/db/sqlalchemy/utils.py:131 nova/db/sqlalchemy/utils.py:135 +#: nova/db/sqlalchemy/utils.py:129 nova/db/sqlalchemy/utils.py:133 #: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:84 #: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:1103 msgid "Exception while creating table." @@ -6129,12 +5736,12 @@ msgid "" "%(ex)s" msgstr "" -#: nova/image/glance.py:306 +#: nova/image/glance.py:327 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "" -#: nova/image/glance.py:322 +#: nova/image/glance.py:343 #, python-format msgid "Successfully transferred using %s" msgstr "" @@ -6280,7 +5887,7 @@ msgstr "" msgid "Not deleting key %s" msgstr "" -#: nova/network/api.py:195 nova/network/neutronv2/api.py:797 +#: nova/network/api.py:196 nova/network/neutronv2/api.py:812 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "re-assign floating IP %(address)s from instance %(instance_id)s" @@ -6309,58 +5916,58 @@ msgstr "Loading compute driver '%s'" msgid "Fixed ip %s not found" msgstr "" -#: nova/network/floating_ips.py:175 +#: nova/network/floating_ips.py:176 #, python-format msgid "Floating IP %s is not associated. Ignore." msgstr "" -#: nova/network/floating_ips.py:194 +#: nova/network/floating_ips.py:195 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "Address |%(address)s| is not allocated" -#: nova/network/floating_ips.py:198 +#: nova/network/floating_ips.py:199 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "Address |%(address)s| is not allocated to your project |%(project)s|" -#: nova/network/floating_ips.py:218 +#: nova/network/floating_ips.py:219 #, python-format msgid "Quota exceeded for %s, tried to allocate floating IP" msgstr "" -#: nova/network/floating_ips.py:277 +#: nova/network/floating_ips.py:278 msgid "Failed to update usages deallocating floating IP" msgstr "Failed to update usages deallocating floating IP" -#: nova/network/floating_ips.py:375 +#: nova/network/floating_ips.py:376 #, python-format msgid "Failed to disassociated floating address: %s" msgstr "" -#: nova/network/floating_ips.py:380 +#: nova/network/floating_ips.py:381 #, python-format msgid "Interface %s not found" msgstr "" -#: nova/network/floating_ips.py:539 +#: nova/network/floating_ips.py:540 #, python-format msgid "Starting migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:545 +#: nova/network/floating_ips.py:546 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will not migrate it " msgstr "" -#: nova/network/floating_ips.py:574 +#: nova/network/floating_ips.py:575 #, python-format msgid "Finishing migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:581 +#: nova/network/floating_ips.py:582 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " @@ -6369,7 +5976,7 @@ msgstr "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will notsetup it." -#: nova/network/floating_ips.py:624 +#: nova/network/floating_ips.py:625 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -6380,12 +5987,12 @@ msgstr "" "not visible to either the floating or instance DNS driver. It will be " "ignored." -#: nova/network/floating_ips.py:664 +#: nova/network/floating_ips.py:665 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." -#: nova/network/floating_ips.py:673 +#: nova/network/floating_ips.py:674 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "Domain |%(domain)s| already exists, changing project to |%(project)s|." @@ -6433,52 +6040,52 @@ msgstr "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " "%(top)r" -#: nova/network/linux_net.py:769 +#: nova/network/linux_net.py:777 #, python-format msgid "Removed %(num)d duplicate rules for floating ip %(float)s" msgstr "" -#: nova/network/linux_net.py:817 +#: nova/network/linux_net.py:825 #, python-format msgid "Error deleting conntrack entries for %s" msgstr "" -#: nova/network/linux_net.py:1072 +#: nova/network/linux_net.py:1091 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "Hupping dnsmasq threw %s" -#: nova/network/linux_net.py:1154 +#: nova/network/linux_net.py:1172 #, python-format msgid "killing radvd threw %s" msgstr "killing radvd threw %s" -#: nova/network/linux_net.py:1308 +#: nova/network/linux_net.py:1333 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: nova/network/linux_net.py:1366 +#: nova/network/linux_net.py:1391 #, python-format msgid "Failed removing net device: '%s'" msgstr "" -#: nova/network/linux_net.py:1543 +#: nova/network/linux_net.py:1568 #, fuzzy, python-format msgid "Adding interface %(interface)s to bridge %(bridge)s" msgstr "Ensuring vlan %(vlan)s and bridge %(bridge)s" -#: nova/network/linux_net.py:1549 +#: nova/network/linux_net.py:1574 #, python-format msgid "Failed to add interface: %s" msgstr "Failed to add interface: %s" -#: nova/network/manager.py:828 +#: nova/network/manager.py:813 #, python-format msgid "instance-dns-zone not found |%s|." msgstr "" -#: nova/network/manager.py:835 +#: nova/network/manager.py:820 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -6489,55 +6096,50 @@ msgstr "" "|%(zone)s|. Instance is in zone |%(zone2)s|. No DNS record will be " "created." -#: nova/network/manager.py:874 -#, python-format -msgid "Quota exceeded for %s, tried to allocate fixed IP" -msgstr "" - -#: nova/network/manager.py:934 +#: nova/network/manager.py:943 msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required." msgstr "" -#: nova/network/manager.py:964 +#: nova/network/manager.py:973 #, fuzzy msgid "Failed to update usages deallocating fixed IP" msgstr "Failed to update usages deallocating floating IP" -#: nova/network/manager.py:988 +#: nova/network/manager.py:997 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "Unable to release %s because vif doesn't exist." -#: nova/network/manager.py:1029 +#: nova/network/manager.py:1038 #, python-format msgid "IP %s leased that is not associated" msgstr "IP %s leased that is not associated" -#: nova/network/manager.py:1035 +#: nova/network/manager.py:1044 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "IP |%s| leased that isn't allocated" -#: nova/network/manager.py:1044 +#: nova/network/manager.py:1053 #, python-format msgid "IP %s released that is not associated" msgstr "IP %s released that is not associated" -#: nova/network/manager.py:1048 +#: nova/network/manager.py:1057 #, python-format msgid "IP %s released that was not leased" msgstr "IP %s released that was not leased" -#: nova/network/manager.py:1066 +#: nova/network/manager.py:1075 #, python-format msgid "%s must be an integer" msgstr "%s must be an integer" -#: nova/network/manager.py:1098 +#: nova/network/manager.py:1107 msgid "Maximum allowed length for 'label' is 255." msgstr "Maximum allowed length for 'label' is 255." -#: nova/network/manager.py:1118 +#: nova/network/manager.py:1127 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " @@ -6546,16 +6148,16 @@ msgstr "" "Subnet(s) too large, defaulting to /%s. To override, specify " "network_size flag." -#: nova/network/manager.py:1203 +#: nova/network/manager.py:1212 msgid "cidr already in use" msgstr "cidr already in use" -#: nova/network/manager.py:1206 +#: nova/network/manager.py:1215 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" -#: nova/network/manager.py:1217 +#: nova/network/manager.py:1226 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " @@ -6564,12 +6166,12 @@ msgstr "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " "(%(smaller)s)" -#: nova/network/manager.py:1311 +#: nova/network/manager.py:1320 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "Network must be disassociated from project %s before delete" -#: nova/network/manager.py:1937 +#: nova/network/manager.py:1955 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" @@ -6577,7 +6179,7 @@ msgstr "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" -#: nova/network/manager.py:1944 +#: nova/network/manager.py:1962 #, fuzzy, python-format msgid "" "The network range is not big enough to fit %(num_networks)s networks. " @@ -6610,103 +6212,87 @@ msgstr "_delete: %s" msgid "Cannot delete domain |%s|" msgstr "Cannot delete aggregate: %(id)s" -#: nova/network/model.py:94 +#: nova/network/model.py:96 #, python-format msgid "Invalid IP format %s" msgstr "" -#: nova/network/neutronv2/api.py:212 -msgid "Neutron error: quota exceeded" -msgstr "" - -#: nova/network/neutronv2/api.py:215 +#: nova/network/neutronv2/api.py:230 #, python-format msgid "Neutron error creating port on network %s" msgstr "" -#: nova/network/neutronv2/api.py:248 +#: nova/network/neutronv2/api.py:263 #, python-format msgid "empty project id for instance %s" msgstr "empty project id for instance %s" -#: nova/network/neutronv2/api.py:283 +#: nova/network/neutronv2/api.py:298 msgid "No network configured!" msgstr "" -#: nova/network/neutronv2/api.py:303 +#: nova/network/neutronv2/api.py:318 #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more " "specific." msgstr "" -#: nova/network/neutronv2/api.py:373 +#: nova/network/neutronv2/api.py:388 #, python-format msgid "Failed to update port %s" msgstr "" -#: nova/network/neutronv2/api.py:380 +#: nova/network/neutronv2/api.py:395 #, python-format msgid "Failed to delete port %s" msgstr "" -#: nova/network/neutronv2/api.py:443 +#: nova/network/neutronv2/api.py:458 #, python-format msgid "Unable to reset device ID for port %s" msgstr "" -#: nova/network/neutronv2/api.py:451 +#: nova/network/neutronv2/api.py:466 #, python-format msgid "Port %s does not exist" msgstr "" -#: nova/network/neutronv2/api.py:454 nova/network/neutronv2/api.py:478 +#: nova/network/neutronv2/api.py:469 nova/network/neutronv2/api.py:493 #, python-format msgid "Failed to delete neutron port %s" msgstr "" -#: nova/network/neutronv2/api.py:576 -#, fuzzy, python-format -msgid "" -"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: " -"%(exception)s" -msgstr "Fail to delete port %(portid)s with failure: %(exception)s" - -#: nova/network/neutronv2/api.py:605 -#, fuzzy, python-format -msgid "Unable to update port %(portid)s with failure: %(exception)s" -msgstr "Fail to delete port %(portid)s with failure: %(exception)s" - -#: nova/network/neutronv2/api.py:632 +#: nova/network/neutronv2/api.py:647 msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" -#: nova/network/neutronv2/api.py:651 +#: nova/network/neutronv2/api.py:666 #, python-format msgid "Failed to access port %s" msgstr "" -#: nova/network/neutronv2/api.py:880 +#: nova/network/neutronv2/api.py:898 #, python-format msgid "Unable to access floating IP %s" msgstr "" -#: nova/network/neutronv2/api.py:968 +#: nova/network/neutronv2/api.py:986 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "Multiple floating IP pools matches found for name '%s'" -#: nova/network/neutronv2/api.py:1012 +#: nova/network/neutronv2/api.py:1030 #, python-format msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" msgstr "" -#: nova/network/neutronv2/api.py:1071 +#: nova/network/neutronv2/api.py:1089 #, python-format msgid "Unable to update host of port %s" msgstr "" -#: nova/network/neutronv2/api.py:1107 +#: nova/network/neutronv2/api.py:1125 #, python-format msgid "" "Network %(id)s not matched with the tenants network! The ports tenant " @@ -6872,7 +6458,7 @@ msgstr "" msgid "A NetworkModel is required here" msgstr "" -#: nova/objects/instance.py:431 +#: nova/objects/instance.py:433 #, python-format msgid "No save handler for %s" msgstr "" @@ -6881,11 +6467,11 @@ msgstr "" msgid "Failed to notify cells of instance info cache update" msgstr "" -#: nova/openstack/common/gettextutils.py:320 +#: nova/openstack/common/gettextutils.py:301 msgid "Message objects do not support addition." msgstr "" -#: nova/openstack/common/gettextutils.py:330 +#: nova/openstack/common/gettextutils.py:311 msgid "" "Message objects do not support str() because they may contain non-ascii " "characters. Please use unicode() or translate() instead." @@ -6905,22 +6491,22 @@ msgstr "" msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" -#: nova/openstack/common/log.py:327 +#: nova/openstack/common/log.py:276 #, fuzzy, python-format msgid "Deprecated: %s" msgstr "Deprecated Config: %s" -#: nova/openstack/common/log.py:436 +#: nova/openstack/common/log.py:385 #, fuzzy, python-format msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "Error reading image info file %(filename)s: %(error)s" -#: nova/openstack/common/log.py:486 +#: nova/openstack/common/log.py:446 #, python-format msgid "syslog facility must be one of: %s" msgstr "syslog facility must be one of: %s" -#: nova/openstack/common/log.py:729 +#: nova/openstack/common/log.py:689 #, fuzzy, python-format msgid "Fatal call to deprecated config: %(msg)s" msgstr "Fatal call to deprecated config %(msg)s" @@ -6979,40 +6565,50 @@ msgstr "Environment not supported over SSH" msgid "process_input not supported over SSH" msgstr "process_input not supported over SSH" -#: nova/openstack/common/sslutils.py:98 +#: nova/openstack/common/sslutils.py:95 #, python-format msgid "Invalid SSL version : %s" msgstr "" -#: nova/openstack/common/strutils.py:92 +#: nova/openstack/common/strutils.py:114 #, python-format msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: nova/openstack/common/strutils.py:197 +#: nova/openstack/common/strutils.py:219 #, python-format msgid "Invalid unit system: \"%s\"" msgstr "" -#: nova/openstack/common/strutils.py:206 +#: nova/openstack/common/strutils.py:228 #, python-format msgid "Invalid string format: %s" msgstr "" -#: nova/openstack/common/versionutils.py:69 +#: nova/openstack/common/versionutils.py:86 #, python-format msgid "" "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " "may be removed in %(remove_in)s." msgstr "" -#: nova/openstack/common/versionutils.py:73 +#: nova/openstack/common/versionutils.py:90 #, python-format msgid "" "%(what)s is deprecated as of %(as_of)s and may be removed in " "%(remove_in)s. It will not be superseded." msgstr "" +#: nova/openstack/common/versionutils.py:94 +#, python-format +msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s." +msgstr "" + +#: nova/openstack/common/versionutils.py:97 +#, python-format +msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded." +msgstr "" + #: nova/openstack/common/db/sqlalchemy/migration.py:226 #, python-format msgid "" @@ -7026,18 +6622,18 @@ msgid "" "the current version of the schema manually." msgstr "" -#: nova/openstack/common/db/sqlalchemy/utils.py:119 +#: nova/openstack/common/db/sqlalchemy/utils.py:118 msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Unknown sort direction, must be 'desc' or 'asc'" -#: nova/openstack/common/db/sqlalchemy/utils.py:162 +#: nova/openstack/common/db/sqlalchemy/utils.py:161 #, python-format msgid "" "There is no `deleted` column in `%s` table. Project doesn't use soft-" "deleted feature." msgstr "" -#: nova/openstack/common/db/sqlalchemy/utils.py:181 +#: nova/openstack/common/db/sqlalchemy/utils.py:180 #, python-format msgid "There is no `project_id` column in `%s` table." msgstr "" @@ -7065,7 +6661,7 @@ msgstr "" msgid "Unsupported id columns type" msgstr "Unsupported Content-Type" -#: nova/pci/pci_manager.py:156 +#: nova/pci/pci_manager.py:113 #, python-format msgid "" "Trying to remove device with %(status)s ownership %(instance_uuid)s " @@ -7097,22 +6693,30 @@ msgstr "Driver must implement schedule_run_instance" msgid "Driver must implement select_destinations" msgstr "" -#: nova/scheduler/filter_scheduler.py:80 +#: nova/scheduler/filter_scheduler.py:84 #, fuzzy, python-format msgid "" "Attempting to build %(num_instances)d instance(s) uuids: " "%(instance_uuids)s" msgstr "Attempting to build %(num_instances)d instance(s)" -#: nova/scheduler/filter_scheduler.py:109 +#: nova/scheduler/filter_scheduler.py:113 #, fuzzy, python-format msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s" msgstr "Destroying VDIs for Instance %(instance_uuid)s" -#: nova/scheduler/filter_scheduler.py:169 +#: nova/scheduler/filter_scheduler.py:173 msgid "Instance disappeared during scheduling" msgstr "" +#: nova/scheduler/filter_scheduler.py:219 +msgid "ServerGroupAffinityFilter not configured" +msgstr "" + +#: nova/scheduler/filter_scheduler.py:224 +msgid "ServerGroupAntiAffinityFilter not configured" +msgstr "" + #: nova/scheduler/host_manager.py:169 #, python-format msgid "Metric name unknown of %r" @@ -7151,7 +6755,6 @@ msgid "No nodes matched due to not matching 'force_nodes' value of '%s'" msgstr "" #: nova/scheduler/host_manager.py:390 -#: nova/scheduler/filters/trusted_filter.py:208 #, python-format msgid "No service for compute ID %s" msgstr "No service for compute ID %s" @@ -7192,7 +6795,7 @@ msgstr "" msgid "Invalid value for 'scheduler_max_attempts', must be >= 1" msgstr "Invalid value for 'scheduler_max_attempts', must be >= 1" -#: nova/scheduler/utils.py:233 +#: nova/scheduler/utils.py:231 #, python-format msgid "Ignoring the invalid elements of the option %(name)s: %(options)s" msgstr "" @@ -7202,6 +6805,10 @@ msgstr "" msgid "%(host_state)s has not been heard from in a while" msgstr "" +#: nova/scheduler/filters/exact_core_filter.py:36 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + #: nova/servicegroup/api.py:70 #, python-format msgid "unknown ServiceGroup driver name: %s" @@ -7300,15 +6907,15 @@ msgstr "Attempted to instantiate singleton" msgid "status must be available" msgstr "status must be available" -#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:245 +#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:290 msgid "already attached" msgstr "already attached" -#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:256 +#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:301 msgid "Instance and volume not in same availability_zone" msgstr "" -#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:262 +#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:307 msgid "already detached" msgstr "already detached" @@ -7316,12 +6923,12 @@ msgstr "already detached" msgid "unexpected role header" msgstr "unexpected role header" -#: nova/tests/api/openstack/test_faults.py:46 +#: nova/tests/api/openstack/test_faults.py:47 msgid "Should be translated." msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3225 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2434 +#: nova/tests/api/openstack/compute/test_servers.py:3279 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2438 msgid "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" @@ -7329,42 +6936,42 @@ msgstr "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" -#: nova/tests/api/openstack/compute/test_servers.py:3230 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2439 +#: nova/tests/api/openstack/compute/test_servers.py:3284 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2443 msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" msgstr "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" -#: nova/tests/api/openstack/compute/test_servers.py:3235 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2444 +#: nova/tests/api/openstack/compute/test_servers.py:3289 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2448 msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" -#: nova/tests/compute/test_compute.py:1680 -#: nova/tests/compute/test_compute.py:1707 -#: nova/tests/compute/test_compute.py:1785 -#: nova/tests/compute/test_compute.py:1825 -#: nova/tests/compute/test_compute.py:5603 +#: nova/tests/compute/test_compute.py:1696 +#: nova/tests/compute/test_compute.py:1723 +#: nova/tests/compute/test_compute.py:1801 +#: nova/tests/compute/test_compute.py:1841 +#: nova/tests/compute/test_compute.py:5644 #, python-format msgid "Running instances: %s" msgstr "Running instances: %s" -#: nova/tests/compute/test_compute.py:1687 -#: nova/tests/compute/test_compute.py:1755 -#: nova/tests/compute/test_compute.py:1793 +#: nova/tests/compute/test_compute.py:1703 +#: nova/tests/compute/test_compute.py:1771 +#: nova/tests/compute/test_compute.py:1809 #, python-format msgid "After terminating instances: %s" msgstr "After terminating instances: %s" -#: nova/tests/compute/test_compute.py:5614 +#: nova/tests/compute/test_compute.py:5655 #, python-format msgid "After force-killing instances: %s" msgstr "After force-killing instances: %s" -#: nova/tests/compute/test_compute.py:6229 +#: nova/tests/compute/test_compute.py:6271 msgid "wrong host/node" msgstr "" -#: nova/tests/compute/test_compute.py:10820 +#: nova/tests/compute/test_compute.py:10867 #, fuzzy msgid "spawn error" msgstr "unknown guestmount error" @@ -7373,7 +6980,16 @@ msgstr "unknown guestmount error" msgid "Keypair data is invalid" msgstr "Keypair data is invalid" -#: nova/tests/db/test_migrations.py:866 +#: nova/tests/compute/test_resources.py:78 +#, python-format +msgid "Free %(free)d < requested %(requested)d " +msgstr "" + +#: nova/tests/compute/test_resources.py:329 +msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs" +msgstr "" + +#: nova/tests/db/test_migrations.py:923 #, python-format msgid "" "The following migrations are missing a downgrade:\n" @@ -7460,58 +7076,58 @@ msgstr "Body: %s" msgid "Unexpected status code" msgstr "Unexpected status code" -#: nova/tests/virt/hyperv/test_hypervapi.py:517 +#: nova/tests/virt/hyperv/test_hypervapi.py:513 #, fuzzy msgid "fake vswitch not found" msgstr "marker [%s] not found" -#: nova/tests/virt/hyperv/test_hypervapi.py:970 +#: nova/tests/virt/hyperv/test_hypervapi.py:966 msgid "Simulated failure" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1041 +#: nova/tests/virt/libvirt/fakelibvirt.py:1048 msgid "Expected a list for 'auth' parameter" msgstr "Expected a list for 'auth' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1045 +#: nova/tests/virt/libvirt/fakelibvirt.py:1052 msgid "Expected a function in 'auth[0]' parameter" msgstr "Expected a function in 'auth[0]' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1049 +#: nova/tests/virt/libvirt/fakelibvirt.py:1056 msgid "Expected a function in 'auth[1]' parameter" msgstr "Expected a function in 'auth[1]' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1060 +#: nova/tests/virt/libvirt/fakelibvirt.py:1067 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:244 +#: nova/tests/virt/vmwareapi/fake.py:241 #, python-format msgid "Property %(attr)s not set for the managed object %(name)s" msgstr "Property %(attr)s not set for the managed object %(name)s" -#: nova/tests/virt/vmwareapi/fake.py:969 +#: nova/tests/virt/vmwareapi/fake.py:985 msgid "There is no VM registered" msgstr "There is no VM registered" -#: nova/tests/virt/vmwareapi/fake.py:971 nova/tests/virt/vmwareapi/fake.py:1307 +#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1323 #, python-format msgid "Virtual Machine with ref %s is not there" msgstr "Virtual Machine with ref %s is not there" -#: nova/tests/virt/vmwareapi/fake.py:1096 +#: nova/tests/virt/vmwareapi/fake.py:1112 msgid "Session Invalid" msgstr "Session Invalid" -#: nova/tests/virt/vmwareapi/fake.py:1304 +#: nova/tests/virt/vmwareapi/fake.py:1320 #, fuzzy msgid "No Virtual Machine has been registered yet" msgstr " No Virtual Machine has been registered yet" #: nova/tests/virt/vmwareapi/test_ds_util.py:221 -#: nova/virt/vmwareapi/ds_util.py:265 +#: nova/virt/vmwareapi/ds_util.py:267 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7535,35 +7151,49 @@ msgstr "" msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "" -#: nova/virt/block_device.py:243 +#: nova/virt/block_device.py:241 #, python-format msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/virt/block_device.py:362 +#: nova/virt/block_device.py:363 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "Booting with volume %(volume_id)s at %(mountpoint)s" -#: nova/virt/driver.py:1242 +#: nova/virt/diagnostics.py:143 +#, python-format +msgid "Invalid type for %s" +msgstr "" + +#: nova/virt/diagnostics.py:147 +#, python-format +msgid "Invalid type for %s entry" +msgstr "" + +#: nova/virt/driver.py:705 +msgid "Hypervisor driver does not support post_live_migration_at_source method" +msgstr "" + +#: nova/virt/driver.py:1261 msgid "Event must be an instance of nova.virt.event.Event" msgstr "" -#: nova/virt/driver.py:1248 +#: nova/virt/driver.py:1267 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "" -#: nova/virt/driver.py:1330 +#: nova/virt/driver.py:1361 msgid "Compute driver option required, but not specified" msgstr "Compute driver option required, but not specified" -#: nova/virt/driver.py:1333 +#: nova/virt/driver.py:1364 #, python-format msgid "Loading compute driver '%s'" msgstr "Loading compute driver '%s'" -#: nova/virt/driver.py:1340 +#: nova/virt/driver.py:1371 #, fuzzy msgid "Unable to load the virtualization driver" msgstr "Unable to load the virtualization driver: %s" @@ -7593,7 +7223,7 @@ msgstr "" msgid "Key '%(key)s' not in instances '%(inst)s'" msgstr "" -#: nova/virt/firewall.py:176 +#: nova/virt/firewall.py:174 msgid "Attempted to unfilter instance which is not filtered" msgstr "Attempted to unfilter instance which is not filtered" @@ -7671,37 +7301,37 @@ msgstr "" msgid "Baremetal node id not supplied to driver for %r" msgstr "" -#: nova/virt/baremetal/driver.py:289 +#: nova/virt/baremetal/driver.py:292 #, python-format msgid "Error deploying instance %(instance)s on baremetal node %(node)s." msgstr "" -#: nova/virt/baremetal/driver.py:364 +#: nova/virt/baremetal/driver.py:367 #, python-format msgid "Baremetal power manager failed to restart node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:376 +#: nova/virt/baremetal/driver.py:379 #, fuzzy, python-format msgid "Destroy called on non-existing instance %s" msgstr "get_info called for instance" -#: nova/virt/baremetal/driver.py:394 +#: nova/virt/baremetal/driver.py:397 #, python-format msgid "Error from baremetal driver during destroy: %s" msgstr "" -#: nova/virt/baremetal/driver.py:399 +#: nova/virt/baremetal/driver.py:402 #, python-format msgid "Error while recording destroy failure in baremetal database: %s" msgstr "" -#: nova/virt/baremetal/driver.py:414 +#: nova/virt/baremetal/driver.py:417 #, python-format msgid "Baremetal power manager failed to stop node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:427 +#: nova/virt/baremetal/driver.py:430 #, python-format msgid "Baremetal power manager failed to start node for instance %r" msgstr "" @@ -7789,7 +7419,7 @@ msgid "" "passed to baremetal driver: %s" msgstr "" -#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:317 +#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:318 #, python-format msgid "Node associated with another instance while waiting for deploy of %s" msgstr "" @@ -7809,7 +7439,7 @@ msgstr "Get console output for instance %s" msgid "PXE deploy failed for instance %s" msgstr "empty project id for instance %s" -#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:342 +#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:343 #, python-format msgid "Baremetal node deleted while waiting for deployment of instance %s" msgstr "" @@ -7826,21 +7456,21 @@ msgid "" "not passed to baremetal driver: %s" msgstr "" -#: nova/virt/baremetal/tilera.py:323 +#: nova/virt/baremetal/tilera.py:324 #, fuzzy, python-format msgid "Tilera deploy started for instance %s" msgstr "empty project id for instance %s" -#: nova/virt/baremetal/tilera.py:329 +#: nova/virt/baremetal/tilera.py:330 #, fuzzy, python-format msgid "Tilera deploy completed for instance %s" msgstr "Get console output for instance %s" -#: nova/virt/baremetal/tilera.py:337 +#: nova/virt/baremetal/tilera.py:338 msgid "Node is unknown error state." msgstr "Node is unknown error state." -#: nova/virt/baremetal/tilera.py:340 +#: nova/virt/baremetal/tilera.py:341 #, fuzzy, python-format msgid "Tilera deploy failed for instance %s" msgstr "Unable to find host for Instance %s" @@ -7959,74 +7589,55 @@ msgstr "No fixed ips associated to instance" msgid "detach volume could not find tid for %s" msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:198 +#: nova/virt/baremetal/db/sqlalchemy/api.py:199 msgid "instance_uuid must be supplied to bm_node_associate_and_update" msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:210 +#: nova/virt/baremetal/db/sqlalchemy/api.py:211 #, python-format msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s." msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:245 -#: nova/virt/baremetal/db/sqlalchemy/api.py:287 +#: nova/virt/baremetal/db/sqlalchemy/api.py:246 +#: nova/virt/baremetal/db/sqlalchemy/api.py:288 #, fuzzy, python-format msgid "Baremetal interface %s not found" msgstr "partition %s not found" -#: nova/virt/baremetal/db/sqlalchemy/api.py:297 +#: nova/virt/baremetal/db/sqlalchemy/api.py:298 #, fuzzy, python-format msgid "Baremetal interface %s already in use" msgstr "Virtual Interface creation failed" -#: nova/virt/baremetal/db/sqlalchemy/api.py:310 +#: nova/virt/baremetal/db/sqlalchemy/api.py:311 #, fuzzy, python-format msgid "Baremetal virtual interface %s not found" msgstr "partition %s not found" -#: nova/virt/disk/api.py:280 +#: nova/virt/disk/api.py:292 msgid "image already mounted" msgstr "image already mounted" -#: nova/virt/disk/api.py:354 -#, fuzzy, python-format -msgid "Ignoring error injecting data into image (%(e)s)" -msgstr "Ignoring error injecting data into image %(img_id)s (%(e)s)" - -#: nova/virt/disk/api.py:376 -#, python-format -msgid "" -"Failed to mount container filesystem '%(image)s' on '%(target)s': " -"%(errors)s" -msgstr "" -"Failed to mount container filesystem '%(image)s' on '%(target)s': " -"%(errors)s" - -#: nova/virt/disk/api.py:406 +#: nova/virt/disk/api.py:418 #, python-format msgid "Failed to teardown container filesystem: %s" msgstr "" -#: nova/virt/disk/api.py:419 +#: nova/virt/disk/api.py:431 #, fuzzy, python-format msgid "Failed to umount container filesystem: %s" msgstr "Failed to unmount container filesystem: %s" -#: nova/virt/disk/api.py:444 -#, fuzzy, python-format -msgid "Ignoring error injecting %(inject)s into image (%(e)s)" -msgstr "Ignoring error injecting data into image %(img_id)s (%(e)s)" - -#: nova/virt/disk/api.py:604 +#: nova/virt/disk/api.py:616 msgid "Not implemented on Windows" msgstr "Not implemented on Windows" -#: nova/virt/disk/api.py:631 +#: nova/virt/disk/api.py:643 #, python-format msgid "User %(username)s not found in password file." msgstr "User %(username)s not found in password file." -#: nova/virt/disk/api.py:647 +#: nova/virt/disk/api.py:659 #, python-format msgid "User %(username)s not found in shadow file." msgstr "User %(username)s not found in shadow file." @@ -8107,44 +7718,44 @@ msgstr "nbd device %s did not show up" msgid "Detaching from erroneous nbd device returned error: %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:64 +#: nova/virt/disk/vfs/guestfs.py:77 #, fuzzy, python-format msgid "No operating system found in %s" msgstr "Floating ip not found for id %s" -#: nova/virt/disk/vfs/guestfs.py:70 +#: nova/virt/disk/vfs/guestfs.py:83 #, python-format msgid "Multi-boot operating system found in %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:81 +#: nova/virt/disk/vfs/guestfs.py:94 #, python-format msgid "No mount points found in %(root)s of %(imgfile)s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:95 +#: nova/virt/disk/vfs/guestfs.py:108 #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(imgfile)s with libguestfs" " (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:131 +#: nova/virt/disk/vfs/guestfs.py:154 #, python-format msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:147 +#: nova/virt/disk/vfs/guestfs.py:170 #, fuzzy, python-format msgid "Failed to close augeas %s" msgstr "Failed to live migrate VM %s" -#: nova/virt/disk/vfs/guestfs.py:155 +#: nova/virt/disk/vfs/guestfs.py:178 #, python-format msgid "Failed to shutdown appliance %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:163 +#: nova/virt/disk/vfs/guestfs.py:186 #, fuzzy, python-format msgid "Failed to close guest handle %s" msgstr "Failed to understand rule %(rule)s" @@ -8210,22 +7821,27 @@ msgstr "Item not found" msgid "Duplicate VM name found: %s" msgstr "duplicate name found: %s" -#: nova/virt/hyperv/migrationops.py:97 +#: nova/virt/hyperv/migrationops.py:98 msgid "Cannot cleanup migration files" msgstr "" -#: nova/virt/hyperv/migrationops.py:105 +#: nova/virt/hyperv/migrationops.py:106 #, python-format msgid "" "Cannot resize the root disk to a smaller size. Current size: " "%(curr_root_gb)s GB. Requested size: %(new_root_gb)s GB" msgstr "" -#: nova/virt/hyperv/migrationops.py:200 +#: nova/virt/hyperv/migrationops.py:155 +#, python-format +msgid "Config drive is required by instance: %s, but it does not exist." +msgstr "" + +#: nova/virt/hyperv/migrationops.py:214 msgid "Cannot resize a VHD to a smaller size" msgstr "" -#: nova/virt/hyperv/migrationops.py:245 +#: nova/virt/hyperv/migrationops.py:259 #, python-format msgid "Cannot find boot VHD file for instance: %s" msgstr "" @@ -8244,7 +7860,7 @@ msgstr "Created switch port %(vm_name)s on switch %(ext_path)s" msgid "No external vswitch found" msgstr "" -#: nova/virt/hyperv/pathutils.py:72 +#: nova/virt/hyperv/pathutils.py:73 #, python-format msgid "The file copy from %(src)s to %(dest)s failed" msgstr "" @@ -8259,25 +7875,20 @@ msgstr "Failed to remove snapshot for VM %s" msgid "Unsupported disk format: %s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:151 -#, python-format -msgid "The %(vhd_type)s type VHD is not supported" +#: nova/virt/hyperv/vhdutils.py:77 +msgid "VHD differencing disks cannot be resized" msgstr "" -#: nova/virt/hyperv/vhdutils.py:162 +#: nova/virt/hyperv/vhdutils.py:165 #, python-format msgid "Unable to obtain block size from VHD %(vhd_path)s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:209 +#: nova/virt/hyperv/vhdutils.py:212 msgid "Unsupported virtual disk format" msgstr "" -#: nova/virt/hyperv/vhdutilsv2.py:135 -msgid "Differencing VHDX images are not supported" -msgstr "" - -#: nova/virt/hyperv/vhdutilsv2.py:158 +#: nova/virt/hyperv/vhdutilsv2.py:160 #, python-format msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s" msgstr "" @@ -8287,47 +7898,47 @@ msgstr "" msgid "VIF driver not found for network_api_class: %s" msgstr "" -#: nova/virt/hyperv/vmops.py:169 +#: nova/virt/hyperv/vmops.py:198 #, python-format msgid "" -"Cannot resize a VHD to a smaller size, the original size is " -"%(base_vhd_size)s, the newer size is %(root_vhd_size)s" +"Cannot resize a VHD to a smaller size, the original size is %(old_size)s," +" the newer size is %(new_size)s" msgstr "" -#: nova/virt/hyperv/vmops.py:206 +#: nova/virt/hyperv/vmops.py:228 #, fuzzy msgid "Spawning new instance" msgstr "Starting instance" -#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:567 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:576 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "" -#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:571 +#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:580 msgid "Using config drive for instance" msgstr "" -#: nova/virt/hyperv/vmops.py:296 +#: nova/virt/hyperv/vmops.py:320 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creating config drive at %(path)s" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:596 +#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:605 #, fuzzy, python-format msgid "Creating config drive failed with error: %s" msgstr "Creating config drive at %(path)s" -#: nova/virt/hyperv/vmops.py:340 +#: nova/virt/hyperv/vmops.py:371 msgid "Got request to destroy instance" msgstr "" -#: nova/virt/hyperv/vmops.py:359 +#: nova/virt/hyperv/vmops.py:390 #, fuzzy, python-format msgid "Failed to destroy instance: %s" msgstr "Failed to destroy vm %s" -#: nova/virt/hyperv/vmops.py:412 +#: nova/virt/hyperv/vmops.py:443 #, python-format msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "Failed to change vm state of %(vm_name)s to %(req_state)s" @@ -8404,78 +8015,83 @@ msgstr "" msgid "Unable to determine disk bus for '%s'" msgstr "Unable to find vbd for vdi %s" -#: nova/virt/libvirt/driver.py:556 +#: nova/virt/libvirt/driver.py:552 #, python-format msgid "Connection to libvirt lost: %s" msgstr "" -#: nova/virt/libvirt/driver.py:739 +#: nova/virt/libvirt/driver.py:741 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "Can not handle authentication request for %d credentials" -#: nova/virt/libvirt/driver.py:932 +#: nova/virt/libvirt/driver.py:924 msgid "operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:1257 +#: nova/virt/libvirt/driver.py:1248 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" -#: nova/virt/libvirt/driver.py:1264 +#: nova/virt/libvirt/driver.py:1255 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" -#: nova/virt/libvirt/driver.py:1352 +#: nova/virt/libvirt/driver.py:1345 msgid "Swap only supports host devices" msgstr "" -#: nova/virt/libvirt/driver.py:1635 +#: nova/virt/libvirt/driver.py:1631 msgid "libvirt error while requesting blockjob info." msgstr "" -#: nova/virt/libvirt/driver.py:1776 +#: nova/virt/libvirt/driver.py:1774 msgid "Found no disk to snapshot." msgstr "" -#: nova/virt/libvirt/driver.py:1868 +#: nova/virt/libvirt/driver.py:1866 #, python-format msgid "Unknown type: %s" msgstr "" -#: nova/virt/libvirt/driver.py:1873 +#: nova/virt/libvirt/driver.py:1871 msgid "snapshot_id required in create_info" msgstr "" -#: nova/virt/libvirt/driver.py:1931 +#: nova/virt/libvirt/driver.py:1929 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" -#: nova/virt/libvirt/driver.py:1938 +#: nova/virt/libvirt/driver.py:1936 #, python-format msgid "Unknown delete_info type %s" msgstr "" -#: nova/virt/libvirt/driver.py:1966 +#: nova/virt/libvirt/driver.py:1964 #, python-format msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2407 nova/virt/xenapi/vmops.py:1552 +#: nova/virt/libvirt/driver.py:2406 nova/virt/xenapi/vmops.py:1561 msgid "Guest does not have a console available" msgstr "Guest does not have a console available" -#: nova/virt/libvirt/driver.py:2823 +#: nova/virt/libvirt/driver.py:2735 +#, python-format +msgid "%s format is not supported" +msgstr "" + +#: nova/virt/libvirt/driver.py:2841 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "" -#: nova/virt/libvirt/driver.py:2989 +#: nova/virt/libvirt/driver.py:2984 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " @@ -8484,30 +8100,22 @@ msgstr "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" -#: nova/virt/libvirt/driver.py:2995 +#: nova/virt/libvirt/driver.py:2990 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "Config requested a custom CPU model, but no model name was provided" -#: nova/virt/libvirt/driver.py:2999 +#: nova/virt/libvirt/driver.py:2994 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "A CPU model name should not be set when a host CPU model is requested" -#: nova/virt/libvirt/driver.py:3019 -msgid "" -"Passthrough of the host CPU was requested but this libvirt version does " -"not support this feature" -msgstr "" -"Passthrough of the host CPU was requested but this libvirt version does " -"not support this feature" - -#: nova/virt/libvirt/driver.py:3567 +#: nova/virt/libvirt/driver.py:3586 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3588 +#: nova/virt/libvirt/driver.py:3607 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " @@ -8516,23 +8124,23 @@ msgstr "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3851 +#: nova/virt/libvirt/driver.py:3873 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "" -#: nova/virt/libvirt/driver.py:3974 +#: nova/virt/libvirt/driver.py:3998 msgid "libvirt version is too old (does not support getVersion)" msgstr "libvirt version is too old (does not support getVersion)" -#: nova/virt/libvirt/driver.py:4335 +#: nova/virt/libvirt/driver.py:4359 msgid "Block migration can not be used with shared storage." msgstr "Block migration can not be used with shared storage." -#: nova/virt/libvirt/driver.py:4344 +#: nova/virt/libvirt/driver.py:4368 msgid "Live migration can not be used without shared storage." msgstr "Live migration can not be used without shared storage." -#: nova/virt/libvirt/driver.py:4414 +#: nova/virt/libvirt/driver.py:4438 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " @@ -8541,7 +8149,7 @@ msgstr "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" -#: nova/virt/libvirt/driver.py:4453 +#: nova/virt/libvirt/driver.py:4477 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8556,12 +8164,12 @@ msgstr "" "\n" "Refer to %(u)s" -#: nova/virt/libvirt/driver.py:4516 +#: nova/virt/libvirt/driver.py:4540 #, python-format msgid "The firewall filter for %s does not exist" msgstr "The firewall filter for %s does not exist" -#: nova/virt/libvirt/driver.py:4579 +#: nova/virt/libvirt/driver.py:4603 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " "or your destination node does not support retrieving listen addresses. " @@ -8570,7 +8178,7 @@ msgid "" "address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." msgstr "" -#: nova/virt/libvirt/driver.py:4596 +#: nova/virt/libvirt/driver.py:4620 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," " and the graphics (VNC and/or SPICE) listen addresses on the destination" @@ -8580,39 +8188,51 @@ msgid "" "succeed, but the VM will continue to listen on the current addresses." msgstr "" -#: nova/virt/libvirt/driver.py:4964 +#: nova/virt/libvirt/driver.py:4997 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:5090 +#: nova/virt/libvirt/driver.py:5123 msgid "Unable to resize disk down." msgstr "" -#: nova/virt/libvirt/imagebackend.py:257 +#: nova/virt/libvirt/imagebackend.py:258 #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:272 +#: nova/virt/libvirt/imagebackend.py:273 msgid "Attempted overwrite of an existing value." msgstr "" -#: nova/virt/libvirt/imagebackend.py:433 +#: nova/virt/libvirt/imagebackend.py:316 +msgid "clone() is not implemented" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:449 msgid "You should specify images_volume_group flag to use LVM images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:548 +#: nova/virt/libvirt/imagebackend.py:522 msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:660 -msgid "rbd python libraries not found" +#: nova/virt/libvirt/imagebackend.py:612 +msgid "installed version of librbd does not support cloning" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:623 +msgid "Image is not raw format" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:631 +msgid "No image locations are accessible" msgstr "" -#: nova/virt/libvirt/imagebackend.py:703 +#: nova/virt/libvirt/imagebackend.py:651 #, python-format msgid "Unknown image_type=%s" msgstr "Unknown image_type=%s" @@ -8641,21 +8261,37 @@ msgstr "Path %s must be LVM logical volume" msgid "volume_clear='%s' is not handled" msgstr "" +#: nova/virt/libvirt/rbd.py:104 +msgid "rbd python libraries not found" +msgstr "" + +#: nova/virt/libvirt/rbd.py:159 +msgid "Not stored in rbd" +msgstr "" + +#: nova/virt/libvirt/rbd.py:163 +msgid "Blank components" +msgstr "" + +#: nova/virt/libvirt/rbd.py:166 +msgid "Not an rbd snapshot" +msgstr "" + #: nova/virt/libvirt/utils.py:79 msgid "Cannot find any Fibre Channel HBAs" msgstr "" -#: nova/virt/libvirt/utils.py:437 +#: nova/virt/libvirt/utils.py:391 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "Can't retrieve root device path from instance libvirt configuration" -#: nova/virt/libvirt/vif.py:356 nova/virt/libvirt/vif.py:574 -#: nova/virt/libvirt/vif.py:750 +#: nova/virt/libvirt/vif.py:338 nova/virt/libvirt/vif.py:545 +#: nova/virt/libvirt/vif.py:709 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" -#: nova/virt/libvirt/vif.py:362 nova/virt/libvirt/vif.py:580 -#: nova/virt/libvirt/vif.py:756 +#: nova/virt/libvirt/vif.py:344 nova/virt/libvirt/vif.py:551 +#: nova/virt/libvirt/vif.py:715 #, fuzzy, python-format msgid "Unexpected vif_type=%s" msgstr "Unexpected error: %s" @@ -8679,69 +8315,48 @@ msgstr "" msgid "Fibre Channel device not found." msgstr "iSCSI device not found at %s" -#: nova/virt/vmwareapi/driver.py:104 -msgid "" -"The VMware ESX driver is now deprecated and will be removed in the Juno " -"release. The VC driver will remain and continue to be supported." -msgstr "" - -#: nova/virt/vmwareapi/driver.py:116 -msgid "" -"Must specify host_ip, host_username and host_password to use " -"compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:128 +#: nova/virt/vmwareapi/driver.py:127 #, python-format msgid "Invalid Regular Expression %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:243 -msgid "Instance cannot be found in host, or in an unknownstate." -msgstr "" - -#: nova/virt/vmwareapi/driver.py:403 +#: nova/virt/vmwareapi/driver.py:141 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "" -#: nova/virt/vmwareapi/driver.py:412 -#, python-format -msgid "The following clusters could not be found in the vCenter %s" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:551 +#: nova/virt/vmwareapi/driver.py:319 #, python-format msgid "The resource %s does not exist" msgstr "" -#: nova/virt/vmwareapi/driver.py:597 +#: nova/virt/vmwareapi/driver.py:381 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:771 +#: nova/virt/vmwareapi/driver.py:555 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." msgstr "" -#: nova/virt/vmwareapi/driver.py:884 +#: nova/virt/vmwareapi/driver.py:678 #, python-format msgid "Unable to validate session %s!" msgstr "" -#: nova/virt/vmwareapi/driver.py:926 +#: nova/virt/vmwareapi/driver.py:720 #, python-format msgid "Session %s is inactive!" msgstr "" -#: nova/virt/vmwareapi/driver.py:1017 +#: nova/virt/vmwareapi/driver.py:811 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" -#: nova/virt/vmwareapi/driver.py:1027 +#: nova/virt/vmwareapi/driver.py:821 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "In vmwareapi:_poll_task, Got this error %s" @@ -8762,15 +8377,15 @@ msgstr "" msgid "Capacity is smaller than free space" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:109 +#: nova/virt/vmwareapi/ds_util.py:111 msgid "datastore name empty" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:114 nova/virt/vmwareapi/ds_util.py:146 +#: nova/virt/vmwareapi/ds_util.py:116 nova/virt/vmwareapi/ds_util.py:148 msgid "path component cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:160 +#: nova/virt/vmwareapi/ds_util.py:162 msgid "datastore path empty" msgstr "" @@ -8932,26 +8547,26 @@ msgstr "Exception in %s " msgid "Unable to retrieve value for %(path)s Reason: %(reason)s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:196 +#: nova/virt/vmwareapi/vm_util.py:202 #, python-format msgid "%s is not supported." msgstr "" -#: nova/virt/vmwareapi/vm_util.py:989 +#: nova/virt/vmwareapi/vm_util.py:1037 msgid "No host available on cluster" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1083 +#: nova/virt/vmwareapi/vm_util.py:1131 #, python-format msgid "Failed to get cluster references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1095 +#: nova/virt/vmwareapi/vm_util.py:1143 #, python-format msgid "Failed to get resource pool references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1285 +#: nova/virt/vmwareapi/vm_util.py:1334 msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None" msgstr "" @@ -8960,15 +8575,15 @@ msgstr "" msgid "Extending virtual disk failed with error: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:249 +#: nova/virt/vmwareapi/vmops.py:253 msgid "Image disk size greater than requested disk size" msgstr "" -#: nova/virt/vmwareapi/vmops.py:856 +#: nova/virt/vmwareapi/vmops.py:861 msgid "instance is not powered on" msgstr "instance is not powered on" -#: nova/virt/vmwareapi/vmops.py:884 +#: nova/virt/vmwareapi/vmops.py:889 msgid "Instance does not exist on backend" msgstr "" @@ -8985,29 +8600,28 @@ msgid "" "contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:972 +#: nova/virt/vmwareapi/vmops.py:971 msgid "pause not supported for vmwareapi" msgstr "pause not supported for vmwareapi" -#: nova/virt/vmwareapi/vmops.py:976 +#: nova/virt/vmwareapi/vmops.py:975 msgid "unpause not supported for vmwareapi" msgstr "unpause not supported for vmwareapi" -#: nova/virt/vmwareapi/vmops.py:994 +#: nova/virt/vmwareapi/vmops.py:993 #, fuzzy msgid "instance is powered off and cannot be suspended." msgstr "instance is powered off and can not be suspended." -#: nova/virt/vmwareapi/vmops.py:1014 +#: nova/virt/vmwareapi/vmops.py:1013 msgid "instance is not in a suspended state" msgstr "instance is not in a suspended state" -#: nova/virt/vmwareapi/vmops.py:1102 -#, fuzzy -msgid "instance is suspended and cannot be powered off." -msgstr "instance is not powered on" +#: nova/virt/vmwareapi/vmops.py:1113 +msgid "Unable to shrink disk." +msgstr "" -#: nova/virt/vmwareapi/vmops.py:1193 +#: nova/virt/vmwareapi/vmops.py:1172 #, fuzzy, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" @@ -9016,15 +8630,25 @@ msgstr "" "In vmwareapi:vmops:destroy, got this exception while un-registering the " "VM: %s" -#: nova/virt/vmwareapi/vmops.py:1255 nova/virt/xenapi/vmops.py:1497 +#: nova/virt/vmwareapi/vmops.py:1248 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "Found %(instance_count)d hung reboots older than %(timeout)d seconds" -#: nova/virt/vmwareapi/vmops.py:1259 nova/virt/xenapi/vmops.py:1501 +#: nova/virt/vmwareapi/vmops.py:1252 nova/virt/xenapi/vmops.py:1504 msgid "Automatically hard rebooting" msgstr "Automatically hard rebooting" +#: nova/virt/vmwareapi/vmops.py:1570 +#, python-format +msgid "No device with interface-id %s exists on VM" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1580 +#, python-format +msgid "No device with MAC address %s exists on the VM" +msgstr "" + #: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" @@ -9053,12 +8677,12 @@ msgstr "Failed to find volume in db" msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" -#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1768 +#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1777 #, python-format msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" msgstr "TIMEOUT: The call to %(method)s timed out. args=%(args)r" -#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1773 +#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1782 #, python-format msgid "" "NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " @@ -9067,7 +8691,7 @@ msgstr "" "NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " "args=%(args)r" -#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1778 +#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1787 #, python-format msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" msgstr "The call to %(method)s returned an error: %(e)s. args=%(args)r" @@ -9133,21 +8757,21 @@ msgstr "" msgid "Failure while cleaning up attached VDIs" msgstr "Failure while cleaning up attached VDIs" -#: nova/virt/xenapi/driver.py:386 +#: nova/virt/xenapi/driver.py:390 #, python-format msgid "Could not determine key: %s" msgstr "Could not determine key: %s" -#: nova/virt/xenapi/driver.py:636 +#: nova/virt/xenapi/driver.py:641 msgid "Host startup on XenServer is not supported." msgstr "Host startup on XenServer is not supported." -#: nova/virt/xenapi/fake.py:811 +#: nova/virt/xenapi/fake.py:820 #, python-format msgid "xenapi.fake does not have an implementation for %s" msgstr "xenapi.fake does not have an implementation for %s" -#: nova/virt/xenapi/fake.py:919 +#: nova/virt/xenapi/fake.py:928 #, python-format msgid "" "xenapi.fake does not have an implementation for %s or it has been called " @@ -9156,7 +8780,7 @@ msgstr "" "xenapi.fake does not have an implementation for %s or it has been called " "with the wrong number of arguments" -#: nova/virt/xenapi/host.py:74 +#: nova/virt/xenapi/host.py:73 #, python-format msgid "" "Instance %(name)s running on %(host)s could not be found in the database:" @@ -9165,37 +8789,37 @@ msgstr "" "Instance %(name)s running on %(host)s could not be found in the database:" " assuming it is a worker VM and skip ping migration to a new host" -#: nova/virt/xenapi/host.py:86 +#: nova/virt/xenapi/host.py:85 #, fuzzy, python-format msgid "Aggregate for host %(host)s count not be found." msgstr "Compute host %(host)s could not be found." -#: nova/virt/xenapi/host.py:105 +#: nova/virt/xenapi/host.py:104 #, python-format msgid "Unable to migrate VM %(vm_ref)s from %(host)s" msgstr "" -#: nova/virt/xenapi/host.py:186 +#: nova/virt/xenapi/host.py:185 msgid "Failed to parse information about a pci device for passthrough" msgstr "" -#: nova/virt/xenapi/host.py:259 +#: nova/virt/xenapi/host.py:258 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to " "take effect." msgstr "" -#: nova/virt/xenapi/host.py:284 +#: nova/virt/xenapi/host.py:283 #, python-format msgid "Failed to extract instance support from %s" msgstr "Failed to extract instance support from %s" -#: nova/virt/xenapi/host.py:301 +#: nova/virt/xenapi/host.py:300 msgid "Unable to get updated status" msgstr "Unable to get updated status" -#: nova/virt/xenapi/host.py:304 +#: nova/virt/xenapi/host.py:303 #, python-format msgid "The call to %(method)s returned an error: %(e)s." msgstr "The call to %(method)s returned an error: %(e)s." @@ -9271,134 +8895,134 @@ msgid "" "Expected %(vlan_num)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:208 +#: nova/virt/xenapi/vm_utils.py:210 #, python-format msgid "" "Device id %(id)s specified is not supported by hypervisor version " "%(version)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:326 nova/virt/xenapi/vm_utils.py:341 +#: nova/virt/xenapi/vm_utils.py:328 nova/virt/xenapi/vm_utils.py:343 msgid "VM already halted, skipping shutdown..." msgstr "VM already halted, skipping shutdown..." -#: nova/virt/xenapi/vm_utils.py:393 +#: nova/virt/xenapi/vm_utils.py:395 #, python-format msgid "VBD %s already detached" msgstr "VBD %s already detached" -#: nova/virt/xenapi/vm_utils.py:396 +#: nova/virt/xenapi/vm_utils.py:398 #, python-format msgid "" "VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt " "%(num_attempt)d/%(max_attempts)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:403 +#: nova/virt/xenapi/vm_utils.py:405 #, python-format msgid "Unable to unplug VBD %s" msgstr "Unable to unplug VBD %s" -#: nova/virt/xenapi/vm_utils.py:406 +#: nova/virt/xenapi/vm_utils.py:408 #, python-format msgid "Reached maximum number of retries trying to unplug VBD %s" msgstr "Reached maximum number of retries trying to unplug VBD %s" -#: nova/virt/xenapi/vm_utils.py:418 +#: nova/virt/xenapi/vm_utils.py:420 #, python-format msgid "Unable to destroy VBD %s" msgstr "Unable to destroy VBD %s" -#: nova/virt/xenapi/vm_utils.py:471 +#: nova/virt/xenapi/vm_utils.py:473 #, python-format msgid "Unable to destroy VDI %s" msgstr "Unable to destroy VDI %s" -#: nova/virt/xenapi/vm_utils.py:517 +#: nova/virt/xenapi/vm_utils.py:519 msgid "SR not present and could not be introduced" msgstr "SR not present and could not be introduced" -#: nova/virt/xenapi/vm_utils.py:701 +#: nova/virt/xenapi/vm_utils.py:703 #, python-format msgid "No primary VDI found for %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:793 +#: nova/virt/xenapi/vm_utils.py:795 #, python-format msgid "" "Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s" " is of type %(type)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:872 +#: nova/virt/xenapi/vm_utils.py:874 #, python-format msgid "Multiple base images for image: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:927 +#: nova/virt/xenapi/vm_utils.py:929 #, python-format msgid "" "VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor " "size of %(new_disk_size)d bytes." msgstr "" -#: nova/virt/xenapi/vm_utils.py:938 nova/virt/xenapi/vmops.py:1037 +#: nova/virt/xenapi/vm_utils.py:940 nova/virt/xenapi/vmops.py:1040 msgid "Can't resize a disk to 0 GB." msgstr "" -#: nova/virt/xenapi/vm_utils.py:990 +#: nova/virt/xenapi/vm_utils.py:992 msgid "Disk must have only one partition." msgstr "" -#: nova/virt/xenapi/vm_utils.py:995 +#: nova/virt/xenapi/vm_utils.py:997 #, python-format msgid "Disk contains a filesystem we are unable to resize: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1000 +#: nova/virt/xenapi/vm_utils.py:1002 msgid "The only partition should be partition 1." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1011 +#: nova/virt/xenapi/vm_utils.py:1013 #, python-format msgid "Attempted auto_configure_disk failed because: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1262 +#: nova/virt/xenapi/vm_utils.py:1264 #, python-format msgid "" "Fast cloning is only supported on default local SR of type ext. SR on " "this system was found to be of type %s. Ignoring the cow flag." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1337 +#: nova/virt/xenapi/vm_utils.py:1339 #, python-format msgid "Unrecognized cache_images value '%s', defaulting to True" msgstr "Unrecognized cache_images value '%s', defaulting to True" -#: nova/virt/xenapi/vm_utils.py:1413 +#: nova/virt/xenapi/vm_utils.py:1415 #, python-format msgid "Invalid value '%s' for torrent_images" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1436 +#: nova/virt/xenapi/vm_utils.py:1438 #, python-format msgid "Invalid value '%d' for image_compression_level" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1462 +#: nova/virt/xenapi/vm_utils.py:1464 #, python-format msgid "" "Download handler '%(handler)s' raised an exception, falling back to " "default handler '%(default_handler)s'" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1518 +#: nova/virt/xenapi/vm_utils.py:1520 #, python-format msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1569 +#: nova/virt/xenapi/vm_utils.py:1571 #, python-format msgid "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " @@ -9407,37 +9031,37 @@ msgstr "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " "bytes" -#: nova/virt/xenapi/vm_utils.py:1611 +#: nova/virt/xenapi/vm_utils.py:1613 msgid "Failed to fetch glance image" msgstr "Failed to fetch glance image" -#: nova/virt/xenapi/vm_utils.py:1819 +#: nova/virt/xenapi/vm_utils.py:1846 #, python-format msgid "Unable to parse rrd of %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1849 +#: nova/virt/xenapi/vm_utils.py:1876 #, python-format msgid "Retry SR scan due to error: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1882 +#: nova/virt/xenapi/vm_utils.py:1909 #, python-format msgid "Flag sr_matching_filter '%s' does not respect formatting convention" msgstr "Flag sr_matching_filter '%s' does not respect formatting convention" -#: nova/virt/xenapi/vm_utils.py:1903 +#: nova/virt/xenapi/vm_utils.py:1930 msgid "" "XenAPI is unable to find a Storage Repository to install guest instances " "on. Please check your configuration (e.g. set a default SR for the pool) " "and/or configure the flag 'sr_matching_filter'." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1916 +#: nova/virt/xenapi/vm_utils.py:1943 msgid "Cannot find SR of content-type ISO" msgstr "Cannot find SR of content-type ISO" -#: nova/virt/xenapi/vm_utils.py:1969 +#: nova/virt/xenapi/vm_utils.py:1996 #, python-format msgid "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " @@ -9446,60 +9070,60 @@ msgstr "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " "%(server)s." -#: nova/virt/xenapi/vm_utils.py:2097 +#: nova/virt/xenapi/vm_utils.py:2124 #, python-format msgid "VHD coalesce attempts exceeded (%d), giving up..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2132 +#: nova/virt/xenapi/vm_utils.py:2159 #, python-format msgid "Timeout waiting for device %s to be created" msgstr "Timeout waiting for device %s to be created" -#: nova/virt/xenapi/vm_utils.py:2152 +#: nova/virt/xenapi/vm_utils.py:2179 #, python-format msgid "Disconnecting stale VDI %s from compute domU" msgstr "Disconnecting stale VDI %s from compute domU" -#: nova/virt/xenapi/vm_utils.py:2310 +#: nova/virt/xenapi/vm_utils.py:2337 msgid "" "Shrinking the filesystem down with resize2fs has failed, please check if " "you have enough free space on your disk." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2445 +#: nova/virt/xenapi/vm_utils.py:2472 msgid "Manipulating interface files directly" msgstr "Manipulating interface files directly" -#: nova/virt/xenapi/vm_utils.py:2454 +#: nova/virt/xenapi/vm_utils.py:2481 #, python-format msgid "Failed to mount filesystem (expected for non-linux instances): %s" msgstr "Failed to mount filesystem (expected for non-linux instances): %s" -#: nova/virt/xenapi/vm_utils.py:2566 +#: nova/virt/xenapi/vm_utils.py:2496 msgid "This domU must be running on the host specified by connection_url" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2635 +#: nova/virt/xenapi/vm_utils.py:2565 msgid "Failed to transfer vhd to new host" msgstr "Failed to transfer vhd to new host" -#: nova/virt/xenapi/vm_utils.py:2661 +#: nova/virt/xenapi/vm_utils.py:2591 msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2667 +#: nova/virt/xenapi/vm_utils.py:2597 msgid "ipxe_network_name not set, user will have to enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2678 +#: nova/virt/xenapi/vm_utils.py:2608 #, python-format msgid "" "Unable to find network matching '%(network_name)s', user will have to " "enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2702 +#: nova/virt/xenapi/vm_utils.py:2632 #, python-format msgid "ISO creation tool '%s' does not exist." msgstr "" @@ -9508,107 +9132,107 @@ msgstr "" msgid "Error: Agent is disabled" msgstr "" -#: nova/virt/xenapi/vmops.py:375 +#: nova/virt/xenapi/vmops.py:378 msgid "ipxe_boot is True but no ISO image found" msgstr "" -#: nova/virt/xenapi/vmops.py:518 +#: nova/virt/xenapi/vmops.py:521 msgid "Failed to spawn, rolling back" msgstr "Failed to spawn, rolling back" -#: nova/virt/xenapi/vmops.py:783 +#: nova/virt/xenapi/vmops.py:786 #, fuzzy msgid "Unable to terminate instance." msgstr "Failed to terminate instance" -#: nova/virt/xenapi/vmops.py:835 +#: nova/virt/xenapi/vmops.py:838 #, python-format msgid "_migrate_disk_resizing_down failed. Restoring orig vm due_to: %s." msgstr "" -#: nova/virt/xenapi/vmops.py:989 +#: nova/virt/xenapi/vmops.py:992 #, python-format msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s." msgstr "" -#: nova/virt/xenapi/vmops.py:996 +#: nova/virt/xenapi/vmops.py:999 #, python-format msgid "_migrate_disk_resizing_up failed to rollback: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:1013 +#: nova/virt/xenapi/vmops.py:1016 msgid "Can't resize down ephemeral disks." msgstr "" -#: nova/virt/xenapi/vmops.py:1124 +#: nova/virt/xenapi/vmops.py:1127 msgid "Starting halted instance found during reboot" msgstr "Starting halted instance found during reboot" -#: nova/virt/xenapi/vmops.py:1130 +#: nova/virt/xenapi/vmops.py:1133 msgid "" "Reboot failed due to bad volumes, detaching bad volumes and starting " "halted instance" msgstr "" -#: nova/virt/xenapi/vmops.py:1208 +#: nova/virt/xenapi/vmops.py:1211 msgid "Unable to update metadata, VM not found." msgstr "" -#: nova/virt/xenapi/vmops.py:1254 +#: nova/virt/xenapi/vmops.py:1257 msgid "Unable to find root VBD/VDI for VM" msgstr "Unable to find root VBD/VDI for VM" -#: nova/virt/xenapi/vmops.py:1292 +#: nova/virt/xenapi/vmops.py:1295 msgid "instance has a kernel or ramdisk but not both" msgstr "instance has a kernel or ramdisk but not both" -#: nova/virt/xenapi/vmops.py:1326 +#: nova/virt/xenapi/vmops.py:1329 msgid "Destroying VM" msgstr "Destroying VM" -#: nova/virt/xenapi/vmops.py:1355 +#: nova/virt/xenapi/vmops.py:1358 msgid "VM is not present, skipping destroy..." msgstr "VM is not present, skipping destroy..." -#: nova/virt/xenapi/vmops.py:1406 +#: nova/virt/xenapi/vmops.py:1409 #, python-format msgid "Instance is already in Rescue Mode: %s" msgstr "Instance is already in Rescue Mode: %s" -#: nova/virt/xenapi/vmops.py:1448 +#: nova/virt/xenapi/vmops.py:1451 #, fuzzy msgid "VM is not present, skipping soft delete..." msgstr "VM is not present, skipping destroy..." -#: nova/virt/xenapi/vmops.py:1834 +#: nova/virt/xenapi/vmops.py:1843 #, python-format msgid "Destination host:%s must be in the same aggregate as the source server" msgstr "" -#: nova/virt/xenapi/vmops.py:1855 +#: nova/virt/xenapi/vmops.py:1864 #, fuzzy msgid "No suitable network for migrate" msgstr "Bad networks format" -#: nova/virt/xenapi/vmops.py:1861 +#: nova/virt/xenapi/vmops.py:1870 #, python-format msgid "PIF %s does not contain IP address" msgstr "" -#: nova/virt/xenapi/vmops.py:1874 +#: nova/virt/xenapi/vmops.py:1883 msgid "Migrate Receive failed" msgstr "Migrate Receive failed" -#: nova/virt/xenapi/vmops.py:1948 +#: nova/virt/xenapi/vmops.py:1957 msgid "XAPI supporting relax-xsm-sr-check=true required" msgstr "" -#: nova/virt/xenapi/vmops.py:1959 +#: nova/virt/xenapi/vmops.py:1968 #, python-format msgid "assert_can_migrate failed because: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:2019 +#: nova/virt/xenapi/vmops.py:2028 msgid "Migrate Send failed" msgstr "Migrate Send failed" @@ -9663,6 +9287,11 @@ msgstr "Mountpoint cannot be translated: %s" msgid "Unable to find SR from VBD %s" msgstr "Unable to find SR from VBD %s" +#: nova/virt/xenapi/volume_utils.py:311 +#, python-format +msgid "Unable to find SR from VDI %s" +msgstr "" + #: nova/virt/xenapi/volumeops.py:63 #, python-format msgid "Connected volume (vdi_uuid): %s" @@ -9743,12 +9372,17 @@ msgstr "Unexpected error: %s" msgid "Starting nova-xvpvncproxy node (version %s)" msgstr "Starting nova-xvpvncproxy node (version %s)" -#: nova/volume/cinder.py:236 +#: nova/volume/cinder.py:257 +#, python-format +msgid "Invalid client version, must be one of: %s" +msgstr "" + +#: nova/volume/cinder.py:281 #, fuzzy msgid "status must be 'in-use'" msgstr "status must be available" -#: nova/volume/cinder.py:242 +#: nova/volume/cinder.py:287 #, fuzzy msgid "status must be 'available'" msgstr "status must be available" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-critical.po b/nova/locale/es/LC_MESSAGES/nova-log-critical.po index 9b9347a953..5d75a6a4ac 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-critical.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-critical.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" -"PO-Revision-Date: 2014-07-16 11:52+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-07-25 14:11+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -19,7 +19,12 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/virt/vmwareapi/driver.py:864 +#: nova/api/openstack/__init__.py:331 +#, python-format +msgid "Missing core API extensions: %s" +msgstr "Extensiones core API omitidas: %s" + +#: nova/virt/vmwareapi/driver.py:658 #, python-format msgid "" "Unable to connect to server at %(server)s, sleeping for %(seconds)s seconds" @@ -27,7 +32,7 @@ msgstr "" "Incapaz de conectar al servidor en %(server)s, esperando durante %(seconds)s " "segundos" -#: nova/virt/vmwareapi/driver.py:973 +#: nova/virt/vmwareapi/driver.py:767 #, python-format msgid "In vmwareapi: _call_method (session=%s)" msgstr "En vmwareapi: _call_method (session=%s)" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-error.po b/nova/locale/es/LC_MESSAGES/nova-log-error.po index 8df99c1938..6e3b9f91b2 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-error.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" -"PO-Revision-Date: 2014-07-16 14:42+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-08-11 15:41+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -39,18 +39,280 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "Anomalía de keystone: %s" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "Encontrado %(ex_name)s inesperado : %(ex_str)s" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "Entorno: %s" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "No se han podido obtener metadatos para el id de instancia: %s" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" +"el estado es UNKNOWN de vm_state=%(vm_state)s task_state=%(task_state)s. " +"¿Actualización errónea o base de datos dañada?" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "Excepción al manejar recurso: %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "Error al migrar %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "Compute.api::inject_network_info %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "Compute.api::resetState %s" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "No se puede encontrar la dirección %r" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "Fallo al obtener las redes predeterminadas" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "No se han podido actualizar los usos desasignando la red." + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" +"Se ha encontrado un error en la definición del nombre de instancia mediante " +"multi_instance_display_name_template." + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" +"Algo malo ha pasado al intentar eliminar la instantánea de la imagen " +"almacenada." + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" +"No se han podido actualizar los usos desasignando el grupo de seguridad " + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "Error de base de datos: %s" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "No se ha encontrado el tipo de instancia %s para suprimirse" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "Error al intentar limpiar imagen %s" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "Fallo al verificar si la instancia se encuentra compartida" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "Fallo durante la compleción una remoción" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "Fallo al detener instancia" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "Fallo al iniciar instancia" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "Se ha encontrado un error en al revertir la migración colgada" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "No se ha podido desasignar la red para la instancia suprimida" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "Fallo al desasociar red para la instancia fallida" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "Error al intentar volver a programar " + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" +"La configuración de red de la instancia falló después de %(attempts)d intento" +"(s)" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "Ha fallado la configuración de dispositivo de bloque en la instancia" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "La instancia no se ha podido generar" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "Fallo de compilación inesperado, no se reprogramará la compilación." + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "Fallo al asociar red(es)" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "Fallo al preparar el dispositivo de bloques" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "Fallo al desasociar redes" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "Estableciendo el vm_state de la instancia a ERROR" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "Fallo al obtener compute_info para %s" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "set_admin_password ha fallado: %s" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "Error al intentar Rescatar Instancia" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "Fallo al revertir las cuotas para un finish_resize fallido: %s" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "Fallo al asociar %(volume_id)s en %(mountpoint)s" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "No se ha podido desconectar el volumen %(volume_id)s de %(mp)s" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" +"Fallo para intercambiar volúmen %(old_volume_id)s por %(new_volume_id)s" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" +"Fallo al conectar hacia al volúmen %(volume_id)s con el volumen en " +"%(mountpoint)s" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "Previo a migración en vivo falló en %s" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "Tarea periódica falló al descargar instancia." + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" +"No se ha podido generar auditoría de uso para la instancia en el host %s " + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" "La tarea periódica sync_power_state ha tenido un error al procesar una " "instancia." +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "error durante stop() en sync_power_state." + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "No se ha podido notificar a las células el error de instancia" @@ -70,11 +332,11 @@ msgstr "La excepción inesperada ha ocurrido %d vez(veces)... reintentando." msgid "Could not release the acquired lock `%s`" msgstr "No se ha podido liberar el bloqueo adquirido `%s`" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "en llamada en bucle de duración fija" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "en llamada en bucle dinámica" @@ -123,50 +385,71 @@ msgstr "Excepción de base de datos recortada." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" +"Se ha encontrado un error en el montaje del sistema de archivos de " +"contenedor '%(image)s' en '%(target)s': : %(errors)s" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" "Nova necesita libvirt versión %(major)i.%(minor)i.%(micro)i o superior." -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "Ha fallado la conexión a libvirt: %s" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "Error de libvirt durante destrucción. Código=%(errcode)s Error=%(e)s" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" "Error de libvirt durante borrado de definición. Código=%(errcode)s Error=" "%(e)s" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" "Error de libvirt durante eliminación de filtro. Código=%(errcode)s Error=" "%(e)s" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "se ha encontrado un error en la conexión del adaptador de red." -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "se ha encontrado un error en la desconexión del adaptador de red." -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" "Fallo al enviar estado de instantánea actualizada al servicio de volumen." -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." @@ -174,70 +457,68 @@ msgstr "" "Incapaz de crear instantánea de VM inmovilizada, intentando nuevamente con " "la inmovilidad deshabilitada" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" "Incapaz de crear instantánea de VM, operación de volume_snapshot fallida." -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" "Error ocurrido durante volume_snapshot_create, enviando estado de error a " "Cinder." -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" "Ha ocurrido un error durante volume_snapshot_delete, envinado estado de " "error a Cinder." -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "Error en '%(path)s' al comprobar E/S directa: '%(ex)s'" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "Error al inyectar datos en imagen %(img_id)s (%(e)s)" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "La creación de unidad de configuración ha fallado con el error: %s" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "La asociación de dispositivos PCI %(dev)s a %(dom)s ha fallado." -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" -msgstr "Un error ha ocurrido al tratar de definir un dominio con xml: %s" +msgid "Error defining a domain with XML: %s" +msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -"Un error ha ocurrido al intentar lanzar un dominio definido con xml: %s" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -"Un error ha ocurrido al habilitar el modo pasador en el dominio con xml: %s" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" "Neutron ha reportado una falla en el evento %(event)s para la instancia " "%(uuid)s" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " @@ -246,22 +527,22 @@ msgstr "" "El nombre del anfitrión ha cambiado de %(old)s a %(new)s. Se requiere un " "reinicio para hacer efecto." -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "Fallo en migración en vivo: %s" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "No se ha podido limpiar el directorio %(target)s: %(e)s" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "Incapaz de preallocate_images=%(imgs)s en la ruta: %(path)s" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " @@ -270,11 +551,6 @@ msgstr "" "El tamaño virtual %(base_size)s de %(base)s es más grande que el tamaño del " "disco raíz del sabor %(size)s" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "Error al abrir imagen rbd %s" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -295,15 +571,19 @@ msgstr "No se ha podido eliminar %(base_file)s, el error era %(error)s" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "Ignorando valor no reconocido volume_clear='%s'" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "Error al abrir imagen rbd %s" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "Fallo al conectar vif" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "No se ha podido desconectar vif" @@ -322,8 +602,18 @@ msgstr "No se puede desmontar el recurso compartido NFS %s" msgid "Couldn't unmount the GlusterFS share %s" msgstr "No se puede desmontar el recurso compartido GlusterFS %s" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-info.po b/nova/locale/es/LC_MESSAGES/nova-log-info.po index 2d20bcc0a1..d0dd8e8bee 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" -"PO-Revision-Date: 2014-07-16 14:42+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-08-07 07:51+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -19,7 +19,32 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "Se ha devuelto %(url)s con HTTP %(status)d" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "Error emitido: %s" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Excepción de HTTP emitida: %s" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "Suprimiendo red con el id %s" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -27,6 +52,20 @@ msgstr "" "Durante sync_power_state la instancia ha dejado una tarea pendiente " "(%(task)s). Omitir." +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -109,96 +148,100 @@ msgstr "Eliminando registro duplicado con id: %(id)s de la tabla: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "La instancia se ha destruido satisfactoriamente. " -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "La instancia puede volver a iniciarse." -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "Se va a volver a destruir la instancia." -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "Empezando proceso de instantánea en directo" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "Empezando proceso de instantánea frío" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "Se ha extraído instantánea, empezando subida de imagen" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "Subida de imagen de instantánea se ha completado" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "" "La instancia ha rearrancado satisfactoriamente de forma no permanente. " -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "La instancia ha concluido satisfactoriamente." -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "Es posible que la instancia se haya rearrancado durante el arranque no " "permanente, por consiguiente volver ahora." -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "La instancia ha rearrancado satisfactoriamente." -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "Instancia generada satisfactoriamente. " -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Se ha devuelto registro de consola truncado, se han ignorado %d bytes " -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "Creando imagen" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "Utilizando unidad de configuración" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creando unidad de configuración en %(path)s" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "Configurando la zona horaria para la instancia windows a horario local" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -208,7 +251,7 @@ msgstr "" "desasociado. Instancia=%(instance_name)s Disco=%(disk)s Código=%(errcode)s " "Error=%(e)s" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -217,39 +260,39 @@ msgstr "" "No se ha podido encontrar el dominio en libvirt para la instancia %s. No se " "pueden obtener estadísticas de bloque para el dispositivo" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "La instancia se está ejecutando satisfactoriamente." -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "Eliminado los archivos de instancia %s" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "La remoción de %s ha fallado" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "La remoción de %s se ha completado" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "Se ha llamado a setup_basic_filtering en nwfilter" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "Asegurando filtros estáticos" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada" @@ -310,11 +353,11 @@ msgstr "Archivos de base corruptos: %s " msgid "Removable base files: %s" msgstr "Archivos de base eliminables: %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "La herramienta findmnt no está instalada" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/es/LC_MESSAGES/nova-log-warning.po b/nova/locale/es/LC_MESSAGES/nova-log-warning.po index 785e991caf..762a305d67 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-warning.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-warning.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-24 16:11+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" @@ -19,15 +19,141 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:2002 +#: nova/api/auth.py:73 +msgid "ratelimit_v3 is removed from v3 api." +msgstr "" + +#: nova/api/auth.py:160 +msgid "Sourcing roles from deprecated X-Role HTTP header" +msgstr "" + +#: nova/api/ec2/__init__.py:169 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: nova/api/ec2/cloud.py:1289 +#: nova/api/openstack/compute/contrib/floating_ips.py:254 +#, python-format +msgid "multiple fixed_ips exist, using the first: %s" +msgstr "" + +#: nova/api/metadata/handler.py:119 +msgid "" +"X-Instance-ID present in request headers. The 'service_metadata_proxy' " +"option must be enabled to process this header." +msgstr "" + +#: nova/api/metadata/handler.py:189 +#, python-format +msgid "" +"X-Instance-ID-Signature: %(signature)s does not match the expected value: " +"%(expected_signature)s for id: %(instance_id)s. Request From: " +"%(remote_address)s" +msgstr "" + +#: nova/api/metadata/handler.py:215 +#, python-format +msgid "" +"Tenant_id %(tenant_id)s does not match tenant_id of instance %(instance_id)s." +msgstr "" + +#: nova/api/metadata/vendordata_json.py:47 +msgid "file does not exist" +msgstr "" + +#: nova/api/metadata/vendordata_json.py:49 +msgid "Unexpected IOError when reading" +msgstr "" + +#: nova/api/metadata/vendordata_json.py:53 +msgid "failed to load json" +msgstr "" + +#: nova/api/openstack/__init__.py:235 nova/api/openstack/__init__.py:409 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: nova/api/openstack/__init__.py:282 +#: nova/api/openstack/compute/plugins/v3/servers.py:104 +#, python-format +msgid "Not loading %s because it is in the blacklist" +msgstr "" + +#: nova/api/openstack/__init__.py:287 +#: nova/api/openstack/compute/plugins/v3/servers.py:109 +#, python-format +msgid "Not loading %s because it is not in the whitelist" +msgstr "" + +#: nova/api/openstack/__init__.py:307 +#, python-format +msgid "Extensions in both blacklist and whitelist: %s" +msgstr "" + +#: nova/api/openstack/common.py:456 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: nova/api/openstack/extensions.py:279 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: nova/api/openstack/compute/servers.py:82 +msgid "" +"XML support has been deprecated and may be removed as early as the Juno " +"release." +msgstr "" + +#: nova/api/openstack/compute/views/servers.py:197 +msgid "Instance has had its instance_type removed from the DB" +msgstr "" + +#: nova/compute/manager.py:2016 msgid "No more network or fixed IP to be allocated" msgstr "" -#: nova/compute/manager.py:2267 +#: nova/compute/manager.py:2256 +#, python-format +msgid "Ignoring EndpointNotFound: %s" +msgstr "" + +#: nova/compute/manager.py:2274 #, python-format msgid "Failed to delete volume: %(volume_id)s due to %(exc)s" msgstr "" +#: nova/compute/utils.py:204 +#, python-format +msgid "Can't access image %(image_id)s: %(error)s" +msgstr "" + +#: nova/compute/utils.py:328 +#, python-format +msgid "" +"No host name specified for the notification of HostAPI.%s and it will be " +"ignored" +msgstr "" + +#: nova/compute/utils.py:456 +#, python-format +msgid "" +"Value of 0 or None specified for %s. This behaviour will change in meaning " +"in the K release, to mean 'call at the default rate' rather than 'do not " +"call'. To keep the 'do not call' behaviour, use a negative value." +msgstr "" + +#: nova/compute/resources/__init__.py:31 +#, python-format +msgid "Compute resource plugin %s was not loaded" +msgstr "" + #: nova/consoleauth/manager.py:84 #, python-format msgid "Token: %(token)s failed to save into memcached." @@ -38,20 +164,36 @@ msgstr "" msgid "Instance: %(instance_uuid)s failed to save into memcached" msgstr "" -#: nova/openstack/common/loopingcall.py:82 +#: nova/network/neutronv2/api.py:214 #, python-format -msgid "task run outlasted interval by %s sec" -msgstr "la ejecución de tarea ha durado %s seg. más que el intervalo" +msgid "Neutron error: Port quota exceeded in tenant: %s" +msgstr "" -#: nova/openstack/common/network_utils.py:146 +#: nova/network/neutronv2/api.py:219 +#, python-format +msgid "Neutron error: No more fixed IPs in network: %s" +msgstr "" + +#: nova/network/neutronv2/api.py:223 +#, python-format +msgid "" +"Neutron error: MAC address %(mac)s is already in use on network %(network)s." +msgstr "" + +#: nova/openstack/common/loopingcall.py:87 +#, python-format +msgid "task %(func_name)s run outlasted interval by %(delay).2f sec" +msgstr "" + +#: nova/openstack/common/network_utils.py:145 msgid "tcp_keepidle not available on your system" msgstr "" -#: nova/openstack/common/network_utils.py:153 +#: nova/openstack/common/network_utils.py:152 msgid "tcp_keepintvl not available on your system" msgstr "" -#: nova/openstack/common/network_utils.py:160 +#: nova/openstack/common/network_utils.py:159 msgid "tcp_keepknt not available on your system" msgstr "" @@ -80,7 +222,7 @@ msgstr "" msgid "SQL connection failed. %s attempts left." msgstr "La conexión SQL ha fallado. Quedan %s intentos." -#: nova/openstack/common/db/sqlalchemy/utils.py:97 +#: nova/openstack/common/db/sqlalchemy/utils.py:96 msgid "Id not in sort_keys; is sort_keys unique?" msgstr "Id no está en sort_keys; ¿es sort_keys exclusivo?" @@ -89,7 +231,7 @@ msgid "VCPUs not set; assuming CPU collection broken" msgstr "" "VCPU no establecidas; suponiendo que la colección de CPU se ha interrumpido" -#: nova/scheduler/filters/core_filter.py:92 +#: nova/scheduler/filters/core_filter.py:102 #, python-format msgid "Could not decode cpu_allocation_ratio: '%s'" msgstr "No se puede decodificar cpu_allocation_ratio: '%s'" @@ -99,14 +241,28 @@ msgstr "No se puede decodificar cpu_allocation_ratio: '%s'" msgid "Could not decode ram_allocation_ratio: '%s'" msgstr "No se puede decodificar ram_allocation_ratio: '%s'" -#: nova/virt/libvirt/driver.py:374 +#: nova/virt/disk/api.py:366 +#, python-format +msgid "Ignoring error injecting data into image %(image)s (%(e)s)" +msgstr "" + +#: nova/virt/disk/api.py:456 +#, python-format +msgid "Ignoring error injecting %(inject)s into image (%(e)s)" +msgstr "" + +#: nova/virt/disk/vfs/api.py:44 +msgid "Unable to import guestfs, falling back to VFSLocalFS" +msgstr "" + +#: nova/virt/libvirt/driver.py:370 #, python-format msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s." msgstr "" "Modalidad de caché %(cache_mode)s no válida especificada para el tipo de " "disco %(disk_type)s." -#: nova/virt/libvirt/driver.py:620 +#: nova/virt/libvirt/driver.py:616 #, python-format msgid "" "The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack " @@ -117,81 +273,81 @@ msgstr "" "projecto de OpenStack por lo cual su calidad no puede ser asegurada. Para " "mas información, ver: https://wiki.openstack.org/wiki/HypervisorSupportMatrix" -#: nova/virt/libvirt/driver.py:671 +#: nova/virt/libvirt/driver.py:673 #, python-format msgid "URI %(uri)s does not support events: %(error)s" msgstr "URI %(uri)s no soporta eventos: %(error)s" -#: nova/virt/libvirt/driver.py:687 +#: nova/virt/libvirt/driver.py:689 #, python-format msgid "URI %(uri)s does not support connection events: %(error)s" msgstr "URI %(uri)s no soporta eventos de conexión: %(error)s" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:921 msgid "Cannot destroy instance, operation time out" msgstr "" "No se puede destruir intsancia, tiempo de espera agotado para la operación" -#: nova/virt/libvirt/driver.py:953 +#: nova/virt/libvirt/driver.py:945 msgid "During wait destroy, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1029 msgid "Instance may be still running, destroy it again." msgstr "Puede que la instancia aún se esté ejecutando, vuelva a destruirla." -#: nova/virt/libvirt/driver.py:1088 +#: nova/virt/libvirt/driver.py:1082 #, python-format msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s" msgstr "" "Ignorando Error de volumen en volumen %(vol_id)s durante la remocion %(exc)s" -#: nova/virt/libvirt/driver.py:1141 +#: nova/virt/libvirt/driver.py:1132 #, python-format msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually" msgstr "" "El volumen %(disk)s es posiblemente inseguro para remover, por favor " "límpialo manualmente" -#: nova/virt/libvirt/driver.py:1415 nova/virt/libvirt/driver.py:1423 +#: nova/virt/libvirt/driver.py:1408 nova/virt/libvirt/driver.py:1416 msgid "During detach_volume, instance disappeared." msgstr "Durante detach_volume, la instancia ha desaparecido." -#: nova/virt/libvirt/driver.py:1466 +#: nova/virt/libvirt/driver.py:1461 msgid "During detach_interface, instance disappeared." msgstr "Durante detach_interface, la instancia ha desaparecido." -#: nova/virt/libvirt/driver.py:2053 +#: nova/virt/libvirt/driver.py:2051 msgid "Failed to soft reboot instance. Trying hard reboot." msgstr "" "Fallo al reiniciar la instancia de manera suave. Intentando reinicio duro." -#: nova/virt/libvirt/driver.py:2614 +#: nova/virt/libvirt/driver.py:2608 #, python-format msgid "Image %s not found on disk storage. Continue without injecting data" msgstr "" "La imagen %s no se ha encontrado en el almacenamiento de disco. Continuando " "sin inyectar datos." -#: nova/virt/libvirt/driver.py:2777 +#: nova/virt/libvirt/driver.py:2795 msgid "File injection into a boot from volume instance is not supported" msgstr "" "La inyección de archivo al arranque desde la instancia del volumen no está " "soportado." -#: nova/virt/libvirt/driver.py:2852 +#: nova/virt/libvirt/driver.py:2870 msgid "Instance disappeared while detaching a PCI device from it." msgstr "" "La instancia ha desaparecido mientras se removía el dispositivo PCI de ella." -#: nova/virt/libvirt/driver.py:2907 +#: nova/virt/libvirt/driver.py:2925 #, python-format msgid "Cannot update service status on host: %s,since it is not registered." msgstr "" "No se puede actualizar el estado del servicio en el anfitrión: %s, ya que el " "mismo no está registrado." -#: nova/virt/libvirt/driver.py:2910 +#: nova/virt/libvirt/driver.py:2928 #, python-format msgid "" "Cannot update service status on host: %s,due to an unexpected exception." @@ -199,24 +355,24 @@ msgstr "" "No se puede atualizar el estado del servicio en el anfitrión: %s, debido a " "una excepción inesperada." -#: nova/virt/libvirt/driver.py:2938 +#: nova/virt/libvirt/driver.py:2956 #, python-format msgid "URI %(uri)s does not support full set of host capabilities: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:3763 +#: nova/virt/libvirt/driver.py:3785 #, python-format msgid "Timeout waiting for vif plugging callback for instance %(uuid)s" msgstr "" "Tiempo excedido para la llamada inversa de la conexión vif para la instancia " "%(uuid)s" -#: nova/virt/libvirt/driver.py:3784 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3841 +#: nova/virt/libvirt/driver.py:3863 msgid "" "Cannot get the number of cpu, because this function is not implemented for " "this platform. " @@ -224,28 +380,28 @@ msgstr "" "No se puede obtener el número de CPU porque esta función no está " "implementada para esta plataforma. " -#: nova/virt/libvirt/driver.py:3901 +#: nova/virt/libvirt/driver.py:3925 #, python-format msgid "" "couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3932 +#: nova/virt/libvirt/driver.py:3956 #, python-format msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4134 +#: nova/virt/libvirt/driver.py:4158 #, python-format msgid "URI %(uri)s does not support listDevices: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:4789 +#: nova/virt/libvirt/driver.py:4813 #, python-format msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d." msgstr "plug_vifs() ha fallado %(cnt)d. Intentando hasta %(max_retry)d." -#: nova/virt/libvirt/driver.py:4990 +#: nova/virt/libvirt/driver.py:5023 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error " @@ -254,7 +410,7 @@ msgstr "" "Error de libvirt al obtener la descripción de %(instance_name)s: [Código de " "error %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:4998 +#: nova/virt/libvirt/driver.py:5031 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -265,7 +421,7 @@ msgstr "" "intentando obtener el disco %(i_name)s, pero el disco ha sido removido por " "operaciones concurrentes como la modificación de tamaño." -#: nova/virt/libvirt/driver.py:5004 +#: nova/virt/libvirt/driver.py:5037 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -273,7 +429,7 @@ msgid "" "on the compute node but is not managed by Nova." msgstr "" -#: nova/virt/libvirt/firewall.py:49 +#: nova/virt/libvirt/firewall.py:50 msgid "" "Libvirt module could not be loaded. NWFilterFirewall will not work correctly." msgstr "" @@ -314,22 +470,15 @@ msgstr "" "tamaño del volumen virtual es %(size)db, pero el espacio libre en el grupo " "de volúmenes es solo %(free_space)db." +#: nova/virt/libvirt/rbd.py:268 +#, python-format +msgid "rbd remove %(volume)s in pool %(pool)s failed" +msgstr "" + #: nova/virt/libvirt/utils.py:69 nova/virt/libvirt/utils.py:75 msgid "systool is not installed" msgstr "systool no está instalado" -#: nova/virt/libvirt/utils.py:248 -#, python-format -msgid "rbd remove %(name)s in pool %(pool)s failed" -msgstr "la remoción rbd de %(name)s en el conjunto %(pool)s ha fallado" - -#: nova/virt/libvirt/vif.py:767 -#, python-format -msgid "" -"VIF driver \"%s\" is marked as deprecated and will be removed in the Juno " -"release." -msgstr "" - #: nova/virt/libvirt/volume.py:132 #, python-format msgid "Unknown content in connection_info/qos_specs: %s" @@ -396,8 +545,24 @@ msgstr "No se puede ejecutar /sbin/mount.sofs" msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "No se puede montar Scality SOFS, compruebe syslog por si hay errores" -#~ msgid "" -#~ "couldn't obtain the vpu count from domain id: %(id)s, exception: %(ex)s" -#~ msgstr "" -#~ "no se puede obtener el conteo de vpu del identificador del dominio: " -#~ "%(id)s, excepción: %(ex)s" +#: nova/virt/vmwareapi/driver.py:95 +msgid "" +"The VMware ESX driver is now deprecated and has been removed in the Juno " +"release. The VC driver will remain and continue to be supported." +msgstr "" + +#: nova/virt/vmwareapi/driver.py:150 +#, python-format +msgid "The following clusters could not be found in the vCenter %s" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:179 +msgid "Instance cannot be found in host, or in an unknownstate." +msgstr "" + +#: nova/volume/cinder.py:249 +msgid "" +"Cinder V1 API is deprecated as of the Juno release, and Nova is still " +"configured to use it. Enable the V2 API in Cinder and set " +"cinder_catalog_info in nova.conf to use it." +msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po index d238d41a52..50b94895f3 100644 --- a/nova/locale/es/LC_MESSAGES/nova.po +++ b/nova/locale/es/LC_MESSAGES/nova.po @@ -12,8 +12,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" -"PO-Revision-Date: 2014-07-19 23:09+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-08-11 22:50+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish " "(http://www.transifex.com/projects/p/nova/language/es/)\n" @@ -23,39 +23,39 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/block_device.py:100 +#: nova/block_device.py:102 msgid "Some fields are invalid." msgstr "" -#: nova/block_device.py:110 +#: nova/block_device.py:112 msgid "Some required fields are missing" msgstr "" -#: nova/block_device.py:126 +#: nova/block_device.py:128 msgid "Boot index is invalid." msgstr "" -#: nova/block_device.py:169 +#: nova/block_device.py:171 msgid "Unrecognized legacy format." msgstr "" -#: nova/block_device.py:186 +#: nova/block_device.py:188 msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:190 +#: nova/block_device.py:192 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:369 +#: nova/block_device.py:371 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:373 +#: nova/block_device.py:375 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:383 +#: nova/block_device.py:385 msgid "Invalid volume_size." msgstr "" @@ -363,7 +363,7 @@ msgstr "" msgid "Group not valid. Reason: %(reason)s" msgstr "Grupo no válido. Razón: %(reason)s" -#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:58 +#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:57 msgid "Sort key supplied was not valid." msgstr "La clave de clasificación proporcionada no es válida. " @@ -750,71 +750,76 @@ msgid "" msgstr "No está permitido crear una interfaz en una red externa %(network_uuid)s" #: nova/exception.py:654 +#, python-format +msgid "Physical network is missing for network %(network_uuid)s" +msgstr "" + +#: nova/exception.py:658 msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "No se ha podido encontrar la(s) referencia(s) de almacén de datos que la " "MV utiliza." -#: nova/exception.py:658 +#: nova/exception.py:662 #, python-format msgid "Port %(port_id)s is still in use." msgstr "El puerto %(port_id)s todavía se está utilizando." -#: nova/exception.py:662 +#: nova/exception.py:666 #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "El puerto %(port_id)s requiere una FixedIP para poder ser utilizado." -#: nova/exception.py:666 +#: nova/exception.py:670 #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "El puerto %(port_id)s no es utilizable para la instancia %(instance)s." -#: nova/exception.py:670 +#: nova/exception.py:674 #, python-format msgid "No free port available for instance %(instance)s." msgstr "No hay ningún puerto libre disponible para la instancia %(instance)s." -#: nova/exception.py:674 +#: nova/exception.py:678 #, python-format msgid "Fixed ip %(address)s already exists." msgstr "La dirección IP estática %(address)s ya existe." -#: nova/exception.py:678 +#: nova/exception.py:682 #, python-format msgid "No fixed IP associated with id %(id)s." msgstr "No hay ninguna dirección IP fija asociada con el %(id)s." -#: nova/exception.py:682 +#: nova/exception.py:686 #, python-format msgid "Fixed ip not found for address %(address)s." msgstr "No se ha encontrado una dirección IP fija para la dirección %(address)s." -#: nova/exception.py:686 +#: nova/exception.py:690 #, python-format msgid "Instance %(instance_uuid)s has zero fixed ips." msgstr "La instancia %(instance_uuid)s no tiene ninguna IP fija." -#: nova/exception.py:690 +#: nova/exception.py:694 #, python-format msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." msgstr "" "El host de red %(host)s no tiene ninguna dirección IP fija en la red " "%(network_id)s." -#: nova/exception.py:695 +#: nova/exception.py:699 #, python-format msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." msgstr "La instancia %(instance_uuid)s no tiene la dirección IP fija '%(ip)s'." -#: nova/exception.py:699 +#: nova/exception.py:703 #, python-format msgid "" "Fixed IP address (%(address)s) does not exist in network " "(%(network_uuid)s)." msgstr "La dirección IP fija (%(address)s) no existe en la red (%(network_uuid)s)." -#: nova/exception.py:704 +#: nova/exception.py:708 #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance " @@ -823,128 +828,128 @@ msgstr "" "La dirección IP fija %(address)s ya se está utilizando en la instancia " "%(instance_uuid)s." -#: nova/exception.py:709 +#: nova/exception.py:713 #, python-format msgid "More than one instance is associated with fixed ip address '%(address)s'." msgstr "Hay más de una instancia asociada con la dirección IP fija '%(address)s'." -#: nova/exception.py:714 +#: nova/exception.py:718 #, python-format msgid "Fixed IP address %(address)s is invalid." msgstr "La dirección IP fija %(address)s no es válida." -#: nova/exception.py:719 +#: nova/exception.py:723 msgid "Zero fixed ips available." msgstr "No hay ninguna dirección IP fija disponible." -#: nova/exception.py:723 +#: nova/exception.py:727 msgid "Zero fixed ips could be found." msgstr "No se ha podido encontrar ninguna dirección IP fija." -#: nova/exception.py:727 +#: nova/exception.py:731 #, python-format msgid "Floating ip %(address)s already exists." msgstr "Ya existe la dirección IP flotante %(address)s." -#: nova/exception.py:732 +#: nova/exception.py:736 #, python-format msgid "Floating ip not found for id %(id)s." msgstr "No se ha encontrado ninguna dirección IP flotante para el id %(id)s." -#: nova/exception.py:736 +#: nova/exception.py:740 #, python-format msgid "The DNS entry %(name)s already exists in domain %(domain)s." msgstr "La entrada de DNS %(name)s ya existe en el dominio %(domain)s." -#: nova/exception.py:740 +#: nova/exception.py:744 #, python-format msgid "Floating ip not found for address %(address)s." msgstr "" "No se ha encontrado ninguna dirección IP flotante para la dirección " "%(address)s." -#: nova/exception.py:744 +#: nova/exception.py:748 #, python-format msgid "Floating ip not found for host %(host)s." msgstr "No se ha encontrado ninguna dirección IP flotante para el host %(host)s." -#: nova/exception.py:748 +#: nova/exception.py:752 #, python-format msgid "Multiple floating ips are found for address %(address)s." msgstr "Se han encontrado varias ip flotantes para la dirección %(address)s." -#: nova/exception.py:752 +#: nova/exception.py:756 msgid "Floating ip pool not found." msgstr "No se ha encontrado pool de ip flotante." -#: nova/exception.py:757 +#: nova/exception.py:761 msgid "Zero floating ips available." msgstr "No hay ninguna dirección IP flotante disponible." -#: nova/exception.py:763 +#: nova/exception.py:767 #, python-format msgid "Floating ip %(address)s is associated." msgstr "La dirección IP flotante %(address)s está asociada." -#: nova/exception.py:767 +#: nova/exception.py:771 #, python-format msgid "Floating ip %(address)s is not associated." msgstr "La dirección IP flotante %(address)s no está asociada." -#: nova/exception.py:771 +#: nova/exception.py:775 msgid "Zero floating ips exist." msgstr "No existe ninguna dirección IP flotante." -#: nova/exception.py:776 +#: nova/exception.py:780 #, python-format msgid "Interface %(interface)s not found." msgstr "No se ha encontrado la interfaz %(interface)s." -#: nova/exception.py:781 nova/api/openstack/compute/contrib/floating_ips.py:97 +#: nova/exception.py:785 nova/api/openstack/compute/contrib/floating_ips.py:98 msgid "Cannot disassociate auto assigned floating ip" msgstr "No se puede desasociar la IP flotante asignada automáticamente" -#: nova/exception.py:786 +#: nova/exception.py:790 #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "No se ha encontrado el par de claves %(name)s para el usuario %(user_id)s" -#: nova/exception.py:790 +#: nova/exception.py:794 #, python-format msgid "Service %(service_id)s could not be found." msgstr "No se ha podido encontrar el servicio %(service_id)s." -#: nova/exception.py:794 +#: nova/exception.py:798 #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Servicio con host %(host)s binario %(binary)s existe." -#: nova/exception.py:798 +#: nova/exception.py:802 #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Servicio con host %(host)s asunto %(topic)s existe." -#: nova/exception.py:802 +#: nova/exception.py:806 #, python-format msgid "Host %(host)s could not be found." msgstr "No se ha podido encontrar el host %(host)s." -#: nova/exception.py:806 +#: nova/exception.py:810 #, python-format msgid "Compute host %(host)s could not be found." msgstr "No se ha podido encontrar el host de Compute %(host)s." -#: nova/exception.py:810 +#: nova/exception.py:814 #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "No se ha podido encontrar el binario %(binary)s en el host %(host)s." -#: nova/exception.py:814 +#: nova/exception.py:818 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "Caducidad de reserva no válida %(expire)s." -#: nova/exception.py:818 +#: nova/exception.py:822 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " @@ -953,70 +958,75 @@ msgstr "" "El cambio produciría un uso inferior a 0 para los recursos siguientes: " "%(unders)s." -#: nova/exception.py:823 +#: nova/exception.py:827 +#, python-format +msgid "Wrong quota method %(method)s used on resource %(res)s" +msgstr "" + +#: nova/exception.py:831 msgid "Quota could not be found" msgstr "No se ha podido encontrar la cuota" -#: nova/exception.py:827 +#: nova/exception.py:835 #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Cuota existente para el proyecto %(project_id)s, recurso %(resource)s" -#: nova/exception.py:832 +#: nova/exception.py:840 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos de cuota desconocidos %(unknown)s." -#: nova/exception.py:836 +#: nova/exception.py:844 #, python-format msgid "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "No se ha encontrado la cuota para el usuario %(user_id)s en el proyecto " "%(project_id)s." -#: nova/exception.py:841 +#: nova/exception.py:849 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "No se ha encontrado la cuota para el proyecto %(project_id)s." -#: nova/exception.py:845 +#: nova/exception.py:853 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "No se ha encontrado la clase de cuota %(class_name)s." -#: nova/exception.py:849 +#: nova/exception.py:857 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "No se ha encontrado el uso de cuota para el proyecto %(project_id)s." -#: nova/exception.py:853 +#: nova/exception.py:861 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "No se ha encontrado la reserva de cuota %(uuid)s." -#: nova/exception.py:857 +#: nova/exception.py:865 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Cuota superada para recursos: %(overs)s" -#: nova/exception.py:861 +#: nova/exception.py:869 #, python-format msgid "Security group %(security_group_id)s not found." msgstr "No se ha encontrado el grupo de seguridad %(security_group_id)s." -#: nova/exception.py:865 +#: nova/exception.py:873 #, python-format msgid "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "No se ha encontrado el grupo de seguridad %(security_group_id)s para el " "proyecto %(project_id)s." -#: nova/exception.py:870 +#: nova/exception.py:878 #, python-format msgid "Security group with rule %(rule_id)s not found." msgstr "No se ha encontrado el grupo de seguridad con la regla %(rule_id)s." -#: nova/exception.py:875 +#: nova/exception.py:883 #, python-format msgid "" "Security group %(security_group_name)s already exists for project " @@ -1025,7 +1035,7 @@ msgstr "" "El grupo de seguridad %(security_group_name)s ya existe para el proyecto " "%(project_id)s" -#: nova/exception.py:880 +#: nova/exception.py:888 #, python-format msgid "" "Security group %(security_group_id)s is already associated with the " @@ -1034,7 +1044,7 @@ msgstr "" "El grupo de seguridad %(security_group_id)s ya está asociado con la " "instancia %(instance_id)s" -#: nova/exception.py:885 +#: nova/exception.py:893 #, python-format msgid "" "Security group %(security_group_id)s is not associated with the instance " @@ -1043,14 +1053,14 @@ msgstr "" "El grupo de seguridad %(security_group_id)s no está asociado con la " "instancia %(instance_id)s" -#: nova/exception.py:890 +#: nova/exception.py:898 #, python-format msgid "Security group default rule (%rule_id)s not found." msgstr "" "La regla predeterminada (%rule_id)s del grupo de seguridad no se ha " "encontrado." -#: nova/exception.py:894 +#: nova/exception.py:902 msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." @@ -1058,33 +1068,33 @@ msgstr "" "La red requiere port_security_enabled y una subred asociada para aplicar " "grupos de seguridad." -#: nova/exception.py:900 +#: nova/exception.py:908 #, python-format msgid "Rule already exists in group: %(rule)s" msgstr "La regla ya existe en el grupo: %(rule)s" -#: nova/exception.py:904 +#: nova/exception.py:912 msgid "No Unique Match Found." msgstr "No se ha encontrado una sola coincidencia." -#: nova/exception.py:909 +#: nova/exception.py:917 #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "No se ha podido encontrar la migración %(migration_id)s." -#: nova/exception.py:913 +#: nova/exception.py:921 #, python-format msgid "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "No se ha encontrado la migración para la instancia %(instance_id)s con el" " estado %(status)s." -#: nova/exception.py:918 +#: nova/exception.py:926 #, python-format msgid "Console pool %(pool_id)s could not be found." msgstr "No se ha podido encontrar la agrupación de consolas %(pool_id)s. " -#: nova/exception.py:922 +#: nova/exception.py:930 #, python-format msgid "" "Console pool with host %(host)s, console_type %(console_type)s and " @@ -1093,7 +1103,7 @@ msgstr "" "El pool de consolas con host %(host)s, console_type %(console_type)s y " "compute_host %(compute_host)s ya existe." -#: nova/exception.py:928 +#: nova/exception.py:936 #, python-format msgid "" "Console pool of type %(console_type)s for compute host %(compute_host)s " @@ -1102,17 +1112,17 @@ msgstr "" "No se ha encontrado la agrupación de consolas de tipo %(console_type)s " "para el host de cálculo %(compute_host)s en el host de proxy %(host)s." -#: nova/exception.py:934 +#: nova/exception.py:942 #, python-format msgid "Console %(console_id)s could not be found." msgstr "No se ha podido encontrar la consola %(console_id)s." -#: nova/exception.py:938 +#: nova/exception.py:946 #, python-format msgid "Console for instance %(instance_uuid)s could not be found." msgstr "No se ha podido encontrar la consola para la instancia %(instance_uuid)s." -#: nova/exception.py:942 +#: nova/exception.py:950 #, python-format msgid "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " @@ -1121,106 +1131,106 @@ msgstr "" "No se ha podido encontrar la consola para la instancia %(instance_uuid)s " "en la agrupación %(pool_id)s." -#: nova/exception.py:947 +#: nova/exception.py:955 #, python-format msgid "Invalid console type %(console_type)s" msgstr "Tipo de consola %(console_type)s no válido " -#: nova/exception.py:951 +#: nova/exception.py:959 #, python-format msgid "Unavailable console type %(console_type)s." msgstr "El tipo de consola %(console_type)s no está disponible." -#: nova/exception.py:955 +#: nova/exception.py:963 #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "El puerto de rangos de consola %(min_port)d-%(max_port)d se ha agotado." -#: nova/exception.py:960 +#: nova/exception.py:968 #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "No se ha podido encontrar el tipo %(flavor_id)s." -#: nova/exception.py:964 +#: nova/exception.py:972 #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "No se puede encontrar el sabor con nombre %(flavor_name)s." -#: nova/exception.py:968 +#: nova/exception.py:976 #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "No se ha encontrado el acceso de sabor para la combinación %(flavor_id)s " "/ %(project_id)s. " -#: nova/exception.py:973 +#: nova/exception.py:981 #, python-format msgid "" "Flavor %(id)d extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" -#: nova/exception.py:978 +#: nova/exception.py:986 #, python-format msgid "Cell %(cell_name)s doesn't exist." msgstr "La célula %(cell_name)s no existe." -#: nova/exception.py:982 +#: nova/exception.py:990 #, python-format msgid "Cell with name %(name)s already exists." msgstr "Una celda con el nombre %(name)s ya existe." -#: nova/exception.py:986 +#: nova/exception.py:994 #, python-format msgid "Inconsistency in cell routing: %(reason)s" msgstr "Incoherencia en direccionamiento de célula: %(reason)s" -#: nova/exception.py:990 +#: nova/exception.py:998 #, python-format msgid "Service API method not found: %(detail)s" msgstr "No se ha encontrado el método de API de servicio: %(detail)s" -#: nova/exception.py:994 +#: nova/exception.py:1002 msgid "Timeout waiting for response from cell" msgstr "Se ha excedido el tiempo de espera de respuesta de la célula" -#: nova/exception.py:998 +#: nova/exception.py:1006 #, python-format msgid "Cell message has reached maximum hop count: %(hop_count)s" msgstr "" "El mensaje de célula ha alcanzado la cuenta de saltos máxima: " "%(hop_count)s" -#: nova/exception.py:1002 +#: nova/exception.py:1010 msgid "No cells available matching scheduling criteria." msgstr "" "No hay células disponibles que coincidan con los criterios de " "planificación." -#: nova/exception.py:1006 +#: nova/exception.py:1014 msgid "Cannot update cells configuration file." msgstr "No se puede actualizar el archivo de configuración de la celda." -#: nova/exception.py:1010 +#: nova/exception.py:1018 #, python-format msgid "Cell is not known for instance %(instance_uuid)s" msgstr "No se conoce la célula en la instancia %(instance_uuid)s" -#: nova/exception.py:1014 +#: nova/exception.py:1022 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "No se ha podido encontrar el filtro de host de planificador " "%(filter_name)s." -#: nova/exception.py:1018 +#: nova/exception.py:1026 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "el sabor %(flavor_id)s no tiene especificaciones extras con clave " "%(extra_specs_key)s" -#: nova/exception.py:1023 +#: nova/exception.py:1031 #, python-format msgid "" "Metric %(name)s could not be found on the compute host node " @@ -1229,67 +1239,67 @@ msgstr "" "La métrica %(name)s no se puede encontrar en el nodo de cómputo anfitrión" " %(host)s:%(node)s." -#: nova/exception.py:1028 +#: nova/exception.py:1036 #, python-format msgid "File %(file_path)s could not be found." msgstr "No se ha podido encontrar el archivo %(file_path)s." -#: nova/exception.py:1032 +#: nova/exception.py:1040 msgid "Zero files could be found." msgstr "No se ha podido encontrar ningún archivo." -#: nova/exception.py:1036 +#: nova/exception.py:1044 #, python-format msgid "Virtual switch associated with the network adapter %(adapter)s not found." msgstr "" "No se ha encontrado ningún conmutador virtual asociado con el adaptador " "de red %(adapter)s." -#: nova/exception.py:1041 +#: nova/exception.py:1049 #, python-format msgid "Network adapter %(adapter)s could not be found." msgstr "No se ha podido encontrar el adaptador de red %(adapter)s." -#: nova/exception.py:1045 +#: nova/exception.py:1053 #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "No se ha podido encontrar la clase %(class_name)s: %(exception)s" -#: nova/exception.py:1049 +#: nova/exception.py:1057 msgid "Action not allowed." msgstr "Acción no permitida. " -#: nova/exception.py:1053 +#: nova/exception.py:1061 msgid "Rotation is not allowed for snapshots" msgstr "No se permite la rotación para instantáneas" -#: nova/exception.py:1057 +#: nova/exception.py:1065 msgid "Rotation param is required for backup image_type" msgstr "" "El parámetro de rotación es necesario para el tipo de imagen de copia de " "seguridad " -#: nova/exception.py:1062 nova/tests/compute/test_keypairs.py:144 +#: nova/exception.py:1070 nova/tests/compute/test_keypairs.py:146 #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "El par de claves '%(key_name)s' ya existe." -#: nova/exception.py:1066 +#: nova/exception.py:1074 #, python-format msgid "Instance %(name)s already exists." msgstr "La instancia %(name)s ya existe." -#: nova/exception.py:1070 +#: nova/exception.py:1078 #, python-format msgid "Flavor with name %(name)s already exists." msgstr "El sabor con nombre %(name)s ya existe." -#: nova/exception.py:1074 +#: nova/exception.py:1082 #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "El sabor con ID %(flavor_id)s ya existe." -#: nova/exception.py:1078 +#: nova/exception.py:1086 #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " @@ -1298,86 +1308,86 @@ msgstr "" "Versión de acceso ya existe para la combinación de la versión " "%(flavor_id)s y el proyecto %(project_id)s." -#: nova/exception.py:1083 +#: nova/exception.py:1091 #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s no está en un almacenamiento compartido: %(reason)s" -#: nova/exception.py:1087 +#: nova/exception.py:1095 #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s no está en un almacenamiento local: %(reason)s" -#: nova/exception.py:1091 +#: nova/exception.py:1099 #, python-format msgid "Storage error: %(reason)s" msgstr "" -#: nova/exception.py:1095 +#: nova/exception.py:1103 #, python-format msgid "Migration error: %(reason)s" msgstr "Error en migración: %(reason)s" -#: nova/exception.py:1099 +#: nova/exception.py:1107 #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Error de pre-verificación de migraión: %(reason)s" -#: nova/exception.py:1103 +#: nova/exception.py:1111 #, python-format msgid "Malformed message body: %(reason)s" msgstr "Cuerpo de mensaje con formato incorrecto: %(reason)s" -#: nova/exception.py:1109 +#: nova/exception.py:1117 #, python-format msgid "Could not find config at %(path)s" msgstr "No se ha podido encontrar configuración en %(path)s" -#: nova/exception.py:1113 +#: nova/exception.py:1121 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "No se ha podido cargar aplicación de pegar '%(name)s' desde %(path)s " -#: nova/exception.py:1117 +#: nova/exception.py:1125 msgid "When resizing, instances must change flavor!" msgstr "Al redimensionarse, las instancias deben cambiar de sabor." -#: nova/exception.py:1121 +#: nova/exception.py:1129 #, python-format msgid "Resize error: %(reason)s" msgstr "Error de redimensionamiento: %(reason)s" -#: nova/exception.py:1125 +#: nova/exception.py:1133 #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "El disco del servidor fue incapaz de re-escalarse debido a: %(reason)s" -#: nova/exception.py:1129 +#: nova/exception.py:1137 msgid "Flavor's memory is too small for requested image." msgstr "La memoria del sabor es demasiado pequeña para la imagen solicitada." -#: nova/exception.py:1133 +#: nova/exception.py:1141 msgid "Flavor's disk is too small for requested image." msgstr "El disco del sabor es demasiado pequeño para la imagen solicitada." -#: nova/exception.py:1137 +#: nova/exception.py:1145 #, python-format msgid "Insufficient free memory on compute node to start %(uuid)s." msgstr "" "No hay suficiente memoria libre en el nodo de cálculo para iniciar " "%(uuid)s." -#: nova/exception.py:1141 +#: nova/exception.py:1149 #, python-format msgid "No valid host was found. %(reason)s" msgstr "No se ha encontrado ningún host válido. %(reason)s" -#: nova/exception.py:1146 +#: nova/exception.py:1154 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "Cuota excedida: código=%(code)s" -#: nova/exception.py:1153 +#: nova/exception.py:1161 #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " @@ -1386,44 +1396,44 @@ msgstr "" "Se ha superado la cuota para %(overs)s: se ha solicitado %(req)s, pero ya" " se utiliza %(used)d de %(allowed)d %(resource)s." -#: nova/exception.py:1158 +#: nova/exception.py:1166 msgid "Maximum number of floating ips exceeded" msgstr "Se ha superado el número máximo de IP flotantes" -#: nova/exception.py:1162 +#: nova/exception.py:1170 msgid "Maximum number of fixed ips exceeded" msgstr "Se ha superado el número máximo de IP fijas." -#: nova/exception.py:1166 +#: nova/exception.py:1174 #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "El número máximo de elementos de metadatos supera %(allowed)d" -#: nova/exception.py:1170 +#: nova/exception.py:1178 msgid "Personality file limit exceeded" msgstr "Se ha superado el límite de archivo de personalidad" -#: nova/exception.py:1174 +#: nova/exception.py:1182 msgid "Personality file path too long" msgstr "Vía de acceso de archivo de personalidad demasiado larga" -#: nova/exception.py:1178 +#: nova/exception.py:1186 msgid "Personality file content too long" msgstr "Contenido del archivo de personalidad demasiado largo" -#: nova/exception.py:1182 nova/tests/compute/test_keypairs.py:155 +#: nova/exception.py:1190 nova/tests/compute/test_keypairs.py:157 msgid "Maximum number of key pairs exceeded" msgstr "Se ha superado el número máximo de pares de claves" -#: nova/exception.py:1187 +#: nova/exception.py:1195 msgid "Maximum number of security groups or rules exceeded" msgstr "Se ha superado el número máximo de grupos o reglas de seguridad" -#: nova/exception.py:1191 +#: nova/exception.py:1199 msgid "Maximum number of ports exceeded" msgstr "El número máximo de puertos ha sido excedido." -#: nova/exception.py:1195 +#: nova/exception.py:1203 #, python-format msgid "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " @@ -1432,144 +1442,140 @@ msgstr "" "Agregado %(aggregate_id)s: la acción '%(action)s' ha producido un error: " "%(reason)s." -#: nova/exception.py:1200 +#: nova/exception.py:1208 #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "No se ha podido encontrar el agregado %(aggregate_id)s." -#: nova/exception.py:1204 +#: nova/exception.py:1212 #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "El agregado %(aggregate_name)s ya existe." -#: nova/exception.py:1208 +#: nova/exception.py:1216 #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "El agregado %(aggregate_id)s no tiene ningún host %(host)s." -#: nova/exception.py:1212 +#: nova/exception.py:1220 #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "El agregado %(aggregate_id)s no tiene metadatos con la clave " "%(metadata_key)s." -#: nova/exception.py:1217 +#: nova/exception.py:1225 #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "El agregado %(aggregate_id)s ya tiene el host %(host)s." -#: nova/exception.py:1221 +#: nova/exception.py:1229 msgid "Unable to create flavor" msgstr "Incapaz de crear sabor" -#: nova/exception.py:1225 +#: nova/exception.py:1233 #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "No se ha podido establecer la contraseña de administrador en %(instance)s" " debido a %(reason)s" -#: nova/exception.py:1231 +#: nova/exception.py:1239 #, python-format msgid "Detected existing vlan with id %(vlan)d" msgstr "Se ha detectado una vlan existente con el ID %(vlan)d" -#: nova/exception.py:1235 +#: nova/exception.py:1243 msgid "There was a conflict when trying to complete your request." msgstr "Hubo un conflicto tratándo de completar su solicitud." -#: nova/exception.py:1241 +#: nova/exception.py:1249 #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "No se ha podido encontrar la instancia %(instance_id)s." -#: nova/exception.py:1245 +#: nova/exception.py:1253 #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" "No se ha podido encontrar la memoria caché de información para la " "instancia %(instance_uuid)s." -#: nova/exception.py:1250 +#: nova/exception.py:1258 #, python-format msgid "Node %(node_id)s could not be found." msgstr "No se ha podido encontrar el nodo %(node_id)s." -#: nova/exception.py:1254 +#: nova/exception.py:1262 #, python-format msgid "Node with UUID %(node_uuid)s could not be found." msgstr "No se ha podido encontrar el nodo con el UUID %(node_uuid)s." -#: nova/exception.py:1258 +#: nova/exception.py:1266 #, python-format msgid "Marker %(marker)s could not be found." msgstr "No se ha podido encontrar el marcador %(marker)s." -#: nova/exception.py:1263 +#: nova/exception.py:1271 #, python-format msgid "Invalid id: %(val)s (expecting \"i-...\")." msgstr "ID no válido: %(val)s (se espera \"i-...\")." -#: nova/exception.py:1267 +#: nova/exception.py:1275 #, python-format msgid "Could not fetch image %(image_id)s" msgstr "No se ha podido captar la imagen %(image_id)s" -#: nova/exception.py:1271 +#: nova/exception.py:1279 #, python-format msgid "Could not upload image %(image_id)s" msgstr "No se ha podido cargar la imagen %(image_id)s" -#: nova/exception.py:1275 +#: nova/exception.py:1283 #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "La tarea %(task_name)s ya se está ejecutando en el host %(host)s" -#: nova/exception.py:1279 +#: nova/exception.py:1287 #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "La tarea %(task_name)s no se está ejecutando en el host %(host)s" -#: nova/exception.py:1283 +#: nova/exception.py:1291 #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "La instancia %(instance_uuid)s está bloqueada" -#: nova/exception.py:1287 +#: nova/exception.py:1295 #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Valor inválido para la opción de configuración de controlador: %(option)s" -#: nova/exception.py:1291 +#: nova/exception.py:1299 #, python-format msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "No se ha podido montar la unidad de configuración vfat. %(operation)s ha " "fallado. Error: %(error)s" -#: nova/exception.py:1296 +#: nova/exception.py:1304 #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Formato de unidad de configuración desconocido %(format)s. Seleccione uno" " de iso9660 o vfat." -#: nova/exception.py:1301 +#: nova/exception.py:1309 #, python-format -msgid "Failed to attach network adapter device to %(instance)s" +msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "" -"Se ha encontrado un error en la conexión del dispositivo de adaptador de " -"red a %(instance)s." -#: nova/exception.py:1305 +#: nova/exception.py:1314 #, python-format -msgid "Failed to detach network adapter device from %(instance)s" +msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" -"Se ha encontrado un error en la desconexión del dispositivo de adaptador " -"de red a %(instance)s." -#: nova/exception.py:1309 +#: nova/exception.py:1319 #, python-format msgid "" "User data too large. User data must be no larger than %(maxsize)s bytes " @@ -1579,11 +1585,11 @@ msgstr "" "más de %(maxsize)s bytes una vez se ha codificado base64. Sus datos " "tienen %(length)d bytes." -#: nova/exception.py:1315 +#: nova/exception.py:1325 msgid "User data needs to be valid base 64." msgstr "Los datos de usuario deben ser de base 64 válidos." -#: nova/exception.py:1319 +#: nova/exception.py:1329 #, python-format msgid "" "Unexpected task state: expecting %(expected)s but the actual state is " @@ -1592,7 +1598,7 @@ msgstr "" "Estado de tarea inesperado: se esperaba %(expected)s pero el estado es " "%(actual)s" -#: nova/exception.py:1328 +#: nova/exception.py:1338 #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not " @@ -1601,12 +1607,12 @@ msgstr "" "La acción para request_id %(request_id)s en la instancia " "%(instance_uuid)s no se ha encontrado." -#: nova/exception.py:1333 +#: nova/exception.py:1343 #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "No se ha encontrado el suceso %(event)s para el id de acción %(action_id)s" -#: nova/exception.py:1337 +#: nova/exception.py:1347 #, python-format msgid "" "Unexpected VM state: expecting %(expected)s but the actual state is " @@ -1615,21 +1621,21 @@ msgstr "" "Estado de VM inesperado: se esperaba %(expected)s pero el estado actual " "es %(actual)s" -#: nova/exception.py:1342 +#: nova/exception.py:1352 #, python-format msgid "The CA file for %(project)s could not be found" msgstr "No se ha podido encontrar el archivo CA para %(project)s " -#: nova/exception.py:1346 +#: nova/exception.py:1356 #, python-format msgid "The CRL file for %(project)s could not be found" msgstr "No se ha podido encontrar el archivo CRL para %(project)s" -#: nova/exception.py:1350 +#: nova/exception.py:1360 msgid "Instance recreate is not supported." msgstr "La recreación de la instancia no está soportada." -#: nova/exception.py:1354 +#: nova/exception.py:1364 #, python-format msgid "" "The service from servicegroup driver %(driver)s is temporarily " @@ -1638,21 +1644,21 @@ msgstr "" "El servicio del controlador servicegroup %(driver)s está temporalmente no" " disponible." -#: nova/exception.py:1359 +#: nova/exception.py:1369 #, python-format msgid "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s ha intentado un acceso de bases de datos directo que no está " "permitido por la política." -#: nova/exception.py:1364 +#: nova/exception.py:1374 #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "El tipo de virtualización '%(virt)s' no está soportado por este " "controlador de cálculo" -#: nova/exception.py:1369 +#: nova/exception.py:1379 #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt " @@ -1661,123 +1667,123 @@ msgstr "" "El hardware solicitado '%(model)s' no está soportado por el controlador " "de virtualización '%(virt)s'" -#: nova/exception.py:1374 +#: nova/exception.py:1384 #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Datos Base-64 inválidos para el archivo %(path)s" -#: nova/exception.py:1378 +#: nova/exception.py:1388 #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Construcción de instancia %(instance_uuid)s abortada: %(reason)s" -#: nova/exception.py:1382 +#: nova/exception.py:1392 #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "Construcción de instancia %(instance_uuid)s reprogramada: %(reason)s" -#: nova/exception.py:1387 +#: nova/exception.py:1397 #, python-format msgid "Shadow table with name %(name)s already exists." msgstr "Una Tabla Shadow con nombre %(name)s ya existe." -#: nova/exception.py:1392 +#: nova/exception.py:1402 #, python-format msgid "Instance rollback performed due to: %s" msgstr "Reversión de instancia ejecutada debido a: %s" -#: nova/exception.py:1398 +#: nova/exception.py:1408 #, python-format msgid "Unsupported object type %(objtype)s" msgstr "Tipo de objeto no soportado %(objtype)s" -#: nova/exception.py:1402 +#: nova/exception.py:1412 #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "No se puede ejecutar %(method)s en un objecto huérfano %(objtype)s" -#: nova/exception.py:1406 +#: nova/exception.py:1416 #, python-format msgid "Version %(objver)s of %(objname)s is not supported" msgstr "Versión %(objver)s de %(objname)s no está soportada" -#: nova/exception.py:1410 +#: nova/exception.py:1420 #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "" -#: nova/exception.py:1414 +#: nova/exception.py:1424 #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "La acción objeto %(action)s falló debido a: %(reason)s" -#: nova/exception.py:1418 +#: nova/exception.py:1428 #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "El campo %(field)s de %(objname)s no es una instancia de campo." -#: nova/exception.py:1422 +#: nova/exception.py:1432 #, python-format msgid "Core API extensions are missing: %(missing_apis)s" msgstr "Faltan las extensiones Core API : %(missing_apis)s" -#: nova/exception.py:1426 +#: nova/exception.py:1436 #, python-format msgid "Error during following call to agent: %(method)s" msgstr "Error durante la siguiente llamada al agente: %(method)s" -#: nova/exception.py:1430 +#: nova/exception.py:1440 #, python-format msgid "Unable to contact guest agent. The following call timed out: %(method)s" msgstr "" "Unposible contactar al agente invitado. La siguiente llamada agotó su " "tiempo de espera: %(method)s" -#: nova/exception.py:1435 +#: nova/exception.py:1445 #, python-format msgid "Agent does not support the call: %(method)s" msgstr "El agente no soporta la llamada %(method)s" -#: nova/exception.py:1439 +#: nova/exception.py:1449 #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "No se ha podido encontrar el grupo de instancias %(group_uuid)s." -#: nova/exception.py:1443 +#: nova/exception.py:1453 #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "El grupo de instancias %(group_uuid)s ya existe." -#: nova/exception.py:1447 +#: nova/exception.py:1457 #, python-format msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s." msgstr "" "El grupo de instancias %(group_uuid)s no tiene metadatos con clave " "%(metadata_key)s" -#: nova/exception.py:1452 +#: nova/exception.py:1462 #, python-format msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s." msgstr "" "El grupo de instancias %(group_uuid)s no tiene miembro con identificador " "%(instance_id)s." -#: nova/exception.py:1457 +#: nova/exception.py:1467 #, python-format msgid "Instance group %(group_uuid)s has no policy %(policy)s." msgstr "El grupo de instancias %(group_uuid)s no tiene política %(policy)s" -#: nova/exception.py:1461 +#: nova/exception.py:1471 #, python-format msgid "Number of retries to plugin (%(num_retries)d) exceeded." msgstr "Se ha excedido el número de reintentos para el plugin (%(num_retries)d)." -#: nova/exception.py:1465 +#: nova/exception.py:1475 #, python-format msgid "There was an error with the download module %(module)s. %(reason)s" msgstr "Hubo un error con el módulo de descarga %(module)s. %(reason)s" -#: nova/exception.py:1470 +#: nova/exception.py:1480 #, python-format msgid "" "The metadata for this location will not work with this module %(module)s." @@ -1786,37 +1792,50 @@ msgstr "" "Los metadatos para esta ubicación no funcionarán con este módulo " "%(module)s. %(reason)s." -#: nova/exception.py:1475 +#: nova/exception.py:1485 #, python-format msgid "The method %(method_name)s is not implemented." msgstr "El método %(method_name)s no está implementado." -#: nova/exception.py:1479 +#: nova/exception.py:1489 #, python-format msgid "The module %(module)s is misconfigured: %(reason)s." msgstr "El módulo %(module)s está mal configurado: %(reason)s" -#: nova/exception.py:1483 +#: nova/exception.py:1493 #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Error al crear monitor de recursos: %(monitor)s" -#: nova/exception.py:1487 +#: nova/exception.py:1497 #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "La dirección PCI %(address)s tiene un formato incorrecto." -#: nova/exception.py:1491 +#: nova/exception.py:1501 +#, python-format +msgid "" +"Invalid PCI Whitelist: The PCI address %(address)s has an invalid " +"%(field)s." +msgstr "" + +#: nova/exception.py:1506 +msgid "" +"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, " +"but not both" +msgstr "" + +#: nova/exception.py:1512 #, python-format msgid "PCI device %(id)s not found" msgstr "Dispositivo PCI %(id)s no encontrado" -#: nova/exception.py:1495 +#: nova/exception.py:1516 #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Dispositivo PCI %(node_id)s:%(address)s no encontrado." -#: nova/exception.py:1499 +#: nova/exception.py:1520 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " @@ -1825,7 +1844,7 @@ msgstr "" "el dispositivo PCI %(compute_node_id)s:%(address)s está %(status)s en " "lugar de %(hopestatus)s" -#: nova/exception.py:1505 +#: nova/exception.py:1526 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead " @@ -1834,12 +1853,12 @@ msgstr "" "El dueño del dispositivo PCI %(compute_node_id)s:%(address)s es %(owner)s" " en lugar de %(hopeowner)s" -#: nova/exception.py:1511 +#: nova/exception.py:1532 #, python-format msgid "PCI device request (%requests)s failed" msgstr "Solicitud de dispositivo PCI (%request)s fallida" -#: nova/exception.py:1516 +#: nova/exception.py:1537 #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty " @@ -1848,32 +1867,32 @@ msgstr "" "Intento de consumir dispositivo PCI %(compute_node_id)s:%(address)s de " "pool vacío" -#: nova/exception.py:1522 +#: nova/exception.py:1543 #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Definición de alias PCI inválido: %(reason)s" -#: nova/exception.py:1526 +#: nova/exception.py:1547 #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "Alias PCI %(alias)s no definido" -#: nova/exception.py:1531 +#: nova/exception.py:1552 #, python-format msgid "Not enough parameters: %(reason)s" msgstr "No hay suficientes parámetros: %(reason)s" -#: nova/exception.py:1536 +#: nova/exception.py:1557 #, python-format msgid "Invalid PCI devices Whitelist config %(reason)s" msgstr "Configuración de lista permisiva de dispositivos PCI inválida %(reason)s" -#: nova/exception.py:1540 +#: nova/exception.py:1561 #, python-format msgid "Cannot change %(node_id)s to %(new_node_id)s" msgstr "No se puede cambiar %(node_id)s hacia %(new_node_id)s" -#: nova/exception.py:1550 +#: nova/exception.py:1571 #, python-format msgid "" "Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: " @@ -1882,39 +1901,39 @@ msgstr "" "Fallo al preparar el dispositivo PCI %(id)s para la instancia " "%(instance_uuid)s: %(reason)s" -#: nova/exception.py:1555 +#: nova/exception.py:1576 #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Fallo al desasociar el dispositivo PCI %(dev)s: %(reason)s" -#: nova/exception.py:1559 +#: nova/exception.py:1580 #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "El hipervisor %(type)s no soporta dispositivos PCI" -#: nova/exception.py:1563 +#: nova/exception.py:1584 #, python-format msgid "Key manager error: %(reason)s" msgstr "error de administrador de claves: %(reason)s" -#: nova/exception.py:1567 +#: nova/exception.py:1588 #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Fallo al remover el(los) volumen(es): (%(reason)s)" -#: nova/exception.py:1571 +#: nova/exception.py:1592 #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Modelo de vídeo proporcionado (%(model)s) no está sopotado." -#: nova/exception.py:1575 +#: nova/exception.py:1596 #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "La ruta del dispositivo RNG proporcionada: (%(path)s) no está presente en" " el anfitrión." -#: nova/exception.py:1580 +#: nova/exception.py:1601 #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the " @@ -1923,19 +1942,19 @@ msgstr "" "La cantidad solicitada de memoria de vídeo %(req_vram)d es mayor que la " "máxima permitida por el sabor %(max_vram)d." -#: nova/exception.py:1585 +#: nova/exception.py:1606 #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "La acción watchdog proporcionada (%(action)s) no está soportada." -#: nova/exception.py:1589 +#: nova/exception.py:1610 msgid "" "Live migration of instances with config drives is not supported in " "libvirt unless libvirt instance path and drive data is shared across " "compute nodes." msgstr "" -#: nova/exception.py:1595 +#: nova/exception.py:1616 #, python-format msgid "" "Host %(server)s is running an old version of Nova, live migrations " @@ -1943,32 +1962,37 @@ msgid "" "and try again." msgstr "" -#: nova/exception.py:1601 +#: nova/exception.py:1622 #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Error durante la extracción de la instancia %(instance_id)s: %(reason)s" -#: nova/exception.py:1605 +#: nova/exception.py:1626 #, python-format msgid "" "Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d" msgstr "" -#: nova/exception.py:1610 +#: nova/exception.py:1631 #, python-format msgid "" "Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d" msgstr "" -#: nova/exception.py:1615 +#: nova/exception.py:1636 #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to" " satisfy for vcpus count %(vcpus)d" msgstr "" +#: nova/exception.py:1641 +#, python-format +msgid "Architecture name '%(arch)s' is not recognised" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -1982,12 +2006,12 @@ msgstr "No se ha podido enviar notificación de actualización de estado" msgid "Failed to get nw_info" msgstr "No se ha podido obtener nw_info" -#: nova/quota.py:1326 +#: nova/quota.py:1332 #, python-format msgid "Failed to commit reservations %s" msgstr "Ha fallado la entrega de reservas %s|" -#: nova/quota.py:1349 +#: nova/quota.py:1355 #, python-format msgid "Failed to roll back reservations %s" msgstr "Fallo al revertir las reservas %s" @@ -2069,37 +2093,41 @@ msgstr "Serie de servidor no válida: %s" msgid "Could not remove tmpdir: %s" msgstr "No se ha podido eliminar directorio temporal: %s" -#: nova/utils.py:963 +#: nova/utils.py:964 +msgid "The input is not a string or unicode" +msgstr "" + +#: nova/utils.py:966 #, python-format msgid "%s is not a string or unicode" msgstr "%s no es una serie o unicode" -#: nova/utils.py:967 +#: nova/utils.py:973 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "%(name)s requiere de, al menos, %(min_length)s caracteres." -#: nova/utils.py:972 +#: nova/utils.py:978 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "%(name)s tiene más de %(max_length)s caracteres." -#: nova/utils.py:982 +#: nova/utils.py:988 #, python-format msgid "%(value_name)s must be an integer" msgstr "%(value_name)s debe ser un entero" -#: nova/utils.py:988 +#: nova/utils.py:994 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "%(value_name)s debe ser >= %(min_value)d" -#: nova/utils.py:994 +#: nova/utils.py:1000 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "%(value_name)s debe ser <= %(max_value)d" -#: nova/utils.py:1028 +#: nova/utils.py:1034 #, python-format msgid "Hypervisor version %s is invalid." msgstr "" @@ -2119,22 +2147,22 @@ msgstr "No se puede asociar a %(host)s:%(port)s" msgid "%(name)s listening on %(host)s:%(port)s" msgstr "%(name)s está escuchando en %(host)s:%(port)s" -#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:50 +#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:47 #, python-format msgid "Unable to find cert_file : %s" msgstr "No se puede encontrar cert_file: %s" -#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:53 +#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:50 #, python-format msgid "Unable to find ca_file : %s" msgstr "No se puede encontrar ca_file: %s" -#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:56 +#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:53 #, python-format msgid "Unable to find key_file : %s" msgstr "No se puede encontrar key_file: %s" -#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:59 +#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:56 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" @@ -2159,232 +2187,194 @@ msgstr "El servidor WSGI se ha detenido." msgid "You must implement __call__" msgstr "Debe implementar __call__" -#: nova/api/auth.py:72 -msgid "ratelimit_v3 is removed from v3 api." -msgstr "ratelimit_v3 se ha removido de la api v3." - -#: nova/api/auth.py:135 +#: nova/api/auth.py:136 msgid "Invalid service catalog json." msgstr "JSON de catálogo de servicios no válido." -#: nova/api/auth.py:159 -msgid "Sourcing roles from deprecated X-Role HTTP header" -msgstr "Proporcionando roles de cabecera HTTP de rol X en desuso" - #: nova/api/sizelimit.py:53 nova/api/sizelimit.py:62 nova/api/sizelimit.py:76 #: nova/api/metadata/password.py:62 msgid "Request is too large." msgstr "La solicitud es demasiado larga." -#: nova/api/ec2/__init__.py:88 +#: nova/api/ec2/__init__.py:89 #, python-format msgid "FaultWrapper: %s" msgstr "FaultWrapper: %s" -#: nova/api/ec2/__init__.py:159 +#: nova/api/ec2/__init__.py:160 msgid "Too many failed authentications." msgstr "Demasiados intentos de autenticacion fallidos." -#: nova/api/ec2/__init__.py:168 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." -msgstr "" -"La clave de acceso %(access_key)s ha tenido %(failures)d autenticaciones " -"anómalas y estará bloqueada durante %(lock_mins)d minutos." - -#: nova/api/ec2/__init__.py:187 +#: nova/api/ec2/__init__.py:188 msgid "Signature not provided" msgstr "Firma no proporcionada" -#: nova/api/ec2/__init__.py:192 +#: nova/api/ec2/__init__.py:193 msgid "Access key not provided" msgstr "Clave de acceso no proporcionada" -#: nova/api/ec2/__init__.py:228 nova/api/ec2/__init__.py:244 +#: nova/api/ec2/__init__.py:229 nova/api/ec2/__init__.py:245 msgid "Failure communicating with keystone" msgstr "Anomalía al comunicarse con keystone" -#: nova/api/ec2/__init__.py:304 +#: nova/api/ec2/__init__.py:305 msgid "Timestamp failed validation." msgstr "Ha fallado la validación de indicación de fecha y hora." -#: nova/api/ec2/__init__.py:402 +#: nova/api/ec2/__init__.py:403 #, python-format msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" "Solicitud no autorizada para el controlador=%(controller)s y la " "acción=%(action)s" -#: nova/api/ec2/__init__.py:492 -#, python-format -msgid "Unexpected %(ex_name)s raised: %(ex_str)s" -msgstr "Encontrado %(ex_name)s inesperado : %(ex_str)s" - -#: nova/api/ec2/__init__.py:495 -#, python-format -msgid "%(ex_name)s raised: %(ex_str)s" -msgstr "%(ex_name)s encontrado: %(ex_str)s" - -#: nova/api/ec2/__init__.py:519 -#, python-format -msgid "Environment: %s" -msgstr "Entorno: %s" - -#: nova/api/ec2/__init__.py:521 +#: nova/api/ec2/__init__.py:522 msgid "Unknown error occurred." msgstr "Ha ocurrido un error desconocido." -#: nova/api/ec2/cloud.py:391 +#: nova/api/ec2/cloud.py:392 #, python-format msgid "Create snapshot of volume %s" msgstr "Crear instantánea del volumen %s" -#: nova/api/ec2/cloud.py:416 +#: nova/api/ec2/cloud.py:417 #, python-format msgid "Could not find key pair(s): %s" msgstr "No se ha podido encontrar par(es) de claves: %s " -#: nova/api/ec2/cloud.py:432 +#: nova/api/ec2/cloud.py:433 #, python-format msgid "Create key pair %s" msgstr "Creando par de claves %s" -#: nova/api/ec2/cloud.py:444 +#: nova/api/ec2/cloud.py:445 #, python-format msgid "Import key %s" msgstr "Importar la clave %s" -#: nova/api/ec2/cloud.py:457 +#: nova/api/ec2/cloud.py:458 #, python-format msgid "Delete key pair %s" msgstr "Borrar para de claves %s" -#: nova/api/ec2/cloud.py:599 nova/api/ec2/cloud.py:729 +#: nova/api/ec2/cloud.py:600 nova/api/ec2/cloud.py:730 msgid "need group_name or group_id" msgstr "se necesita group_name o group_id" -#: nova/api/ec2/cloud.py:604 +#: nova/api/ec2/cloud.py:605 msgid "can't build a valid rule" msgstr "No se ha podido crear una regla válida" -#: nova/api/ec2/cloud.py:612 +#: nova/api/ec2/cloud.py:613 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "Protocolo IP no válido %(protocol)s" -#: nova/api/ec2/cloud.py:646 nova/api/ec2/cloud.py:682 +#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:683 msgid "No rule for the specified parameters." msgstr "No hay regla para los parámetros especificados." -#: nova/api/ec2/cloud.py:760 +#: nova/api/ec2/cloud.py:761 #, python-format msgid "Get console output for instance %s" msgstr "Obtener salida de la consola para la instancia %s" -#: nova/api/ec2/cloud.py:832 +#: nova/api/ec2/cloud.py:833 #, python-format msgid "Create volume from snapshot %s" msgstr "Crear volumen desde la instantánea %s" -#: nova/api/ec2/cloud.py:836 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:837 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "Crear volumen de %s GB" -#: nova/api/ec2/cloud.py:876 +#: nova/api/ec2/cloud.py:877 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" "Conectar el volumen %(volume_id)s a la instancia %(instance_id)s en " "%(device)s" -#: nova/api/ec2/cloud.py:906 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:907 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "Desasociar volumen %s" -#: nova/api/ec2/cloud.py:1238 +#: nova/api/ec2/cloud.py:1261 msgid "Allocate address" msgstr "Asignar dirección" -#: nova/api/ec2/cloud.py:1243 +#: nova/api/ec2/cloud.py:1266 #, python-format msgid "Release address %s" msgstr "Liberar dirección %s" -#: nova/api/ec2/cloud.py:1248 +#: nova/api/ec2/cloud.py:1271 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "Asociar dirección %(public_ip)s a instancia %(instance_id)s" -#: nova/api/ec2/cloud.py:1258 +#: nova/api/ec2/cloud.py:1281 msgid "Unable to associate IP Address, no fixed_ips." msgstr "No se puede asociar la dirección IP, sin fixed_ips." -#: nova/api/ec2/cloud.py:1266 -#: nova/api/openstack/compute/contrib/floating_ips.py:251 -#, python-format -msgid "multiple fixed_ips exist, using the first: %s" -msgstr "existen múltiples fixed_ips, utilizando la primera: %s" - -#: nova/api/ec2/cloud.py:1279 +#: nova/api/ec2/cloud.py:1302 #, python-format msgid "Disassociate address %s" msgstr "Desasociar dirección %s" -#: nova/api/ec2/cloud.py:1296 nova/api/openstack/compute/servers.py:918 +#: nova/api/ec2/cloud.py:1319 nova/api/openstack/compute/servers.py:920 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "min_count debe ser <= max_count " -#: nova/api/ec2/cloud.py:1328 +#: nova/api/ec2/cloud.py:1351 msgid "Image must be available" msgstr "La imagen debe estar disponible " -#: nova/api/ec2/cloud.py:1424 +#: nova/api/ec2/cloud.py:1451 #, python-format msgid "Reboot instance %r" msgstr "Reiniciar instancia %r" -#: nova/api/ec2/cloud.py:1537 +#: nova/api/ec2/cloud.py:1566 #, python-format msgid "De-registering image %s" msgstr "Des-registrando la imagen %s" -#: nova/api/ec2/cloud.py:1553 +#: nova/api/ec2/cloud.py:1582 msgid "imageLocation is required" msgstr "Se necesita imageLocation" -#: nova/api/ec2/cloud.py:1573 +#: nova/api/ec2/cloud.py:1602 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "Imagen registrada %(image_location)s con el id %(image_id)s" -#: nova/api/ec2/cloud.py:1634 +#: nova/api/ec2/cloud.py:1663 msgid "user or group not specified" msgstr "usuario o grupo no especificado" -#: nova/api/ec2/cloud.py:1637 +#: nova/api/ec2/cloud.py:1666 msgid "only group \"all\" is supported" msgstr "sólo el grupo \"all\" está soportado" -#: nova/api/ec2/cloud.py:1640 +#: nova/api/ec2/cloud.py:1669 msgid "operation_type must be add or remove" msgstr "operation_type debe ser añadir o eliminar" -#: nova/api/ec2/cloud.py:1642 +#: nova/api/ec2/cloud.py:1671 #, python-format msgid "Updating image %s publicity" msgstr "Actualizando imagen %s públicamente" -#: nova/api/ec2/cloud.py:1655 +#: nova/api/ec2/cloud.py:1684 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "No está permitido modificar los atributos para la imagen %s" -#: nova/api/ec2/cloud.py:1685 +#: nova/api/ec2/cloud.py:1714 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " @@ -2393,48 +2383,48 @@ msgstr "" "Valor no válido '%(ec2_instance_id)s' para el ID de instancia. La " "instancia no tiene ningún volumen conectado en la raíz (%(root)s)." -#: nova/api/ec2/cloud.py:1718 +#: nova/api/ec2/cloud.py:1747 #, python-format msgid "" "Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " "%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1742 +#: nova/api/ec2/cloud.py:1771 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "imagen de %(instance)s en %(now)s" -#: nova/api/ec2/cloud.py:1767 nova/api/ec2/cloud.py:1817 +#: nova/api/ec2/cloud.py:1796 nova/api/ec2/cloud.py:1846 msgid "resource_id and tag are required" msgstr "resource_id y tag son necesarios" -#: nova/api/ec2/cloud.py:1771 nova/api/ec2/cloud.py:1821 +#: nova/api/ec2/cloud.py:1800 nova/api/ec2/cloud.py:1850 msgid "Expecting a list of resources" msgstr "Esperando una lista de recursos" -#: nova/api/ec2/cloud.py:1776 nova/api/ec2/cloud.py:1826 -#: nova/api/ec2/cloud.py:1884 +#: nova/api/ec2/cloud.py:1805 nova/api/ec2/cloud.py:1855 +#: nova/api/ec2/cloud.py:1913 msgid "Only instances implemented" msgstr "Sólo están implementadas instancias" -#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1830 +#: nova/api/ec2/cloud.py:1809 nova/api/ec2/cloud.py:1859 msgid "Expecting a list of tagSets" msgstr "Esperando una lista de tagSets" -#: nova/api/ec2/cloud.py:1786 nova/api/ec2/cloud.py:1839 +#: nova/api/ec2/cloud.py:1815 nova/api/ec2/cloud.py:1868 msgid "Expecting tagSet to be key/value pairs" msgstr "Esperando que tagSet sea un par clave/valor" -#: nova/api/ec2/cloud.py:1793 +#: nova/api/ec2/cloud.py:1822 msgid "Expecting both key and value to be set" msgstr "Esperando establecimiento tanto de clave como valor" -#: nova/api/ec2/cloud.py:1844 +#: nova/api/ec2/cloud.py:1873 msgid "Expecting key to be set" msgstr "Esperando el establecimiento de la clave" -#: nova/api/ec2/cloud.py:1918 +#: nova/api/ec2/cloud.py:1947 msgid "Invalid CIDR" msgstr "CIDR no válido" @@ -2453,247 +2443,145 @@ msgstr "" msgid "Timestamp is invalid." msgstr "La indicación de fecha y hora no es válida." -#: nova/api/metadata/handler.py:112 -msgid "" -"X-Instance-ID present in request headers. The " -"'service_neutron_metadata_proxy' option must be enabled to process this " -"header." -msgstr "" -"X-Instance-ID presente en encabezados de soicitud. La opción " -"'service_neutron_metadata_proy' debe ser habilitada para procesar este " -"encabezado." - -#: nova/api/metadata/handler.py:141 nova/api/metadata/handler.py:148 +#: nova/api/metadata/handler.py:148 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "Fallo al generar metadatos para la ip %s" -#: nova/api/metadata/handler.py:143 nova/api/metadata/handler.py:199 +#: nova/api/metadata/handler.py:150 nova/api/metadata/handler.py:207 msgid "An unknown error has occurred. Please try your request again." msgstr "Ha sucedido un error desconocido. Por favor repite el intento de nuevo." -#: nova/api/metadata/handler.py:161 +#: nova/api/metadata/handler.py:169 msgid "X-Instance-ID header is missing from request." msgstr "Falta la cabecera de ID de instancia X en la solicitud." -#: nova/api/metadata/handler.py:163 +#: nova/api/metadata/handler.py:171 msgid "X-Tenant-ID header is missing from request." msgstr "el encabezado X-Tenant-ID falta en la solicitud." -#: nova/api/metadata/handler.py:165 +#: nova/api/metadata/handler.py:173 msgid "Multiple X-Instance-ID headers found within request." msgstr "Se han encontrado varias cabeceas de ID de instancia X en la solicitud." -#: nova/api/metadata/handler.py:167 +#: nova/api/metadata/handler.py:175 msgid "Multiple X-Tenant-ID headers found within request." msgstr "Se han encontrado múltiples encabezados X-Tenant-ID en la solicitud." -#: nova/api/metadata/handler.py:181 -#, python-format -msgid "" -"X-Instance-ID-Signature: %(signature)s does not match the expected value:" -" %(expected_signature)s for id: %(instance_id)s. Request From: " -"%(remote_address)s" -msgstr "" -"Firma_ID_instancia_X: %(signature)s no coincide con el valor esperado: " -"%(expected_signature)s para el ID: %(instance_id)s. Solicitud desde: " -"%(remote_address)s " - -#: nova/api/metadata/handler.py:190 +#: nova/api/metadata/handler.py:198 msgid "Invalid proxy request signature." msgstr "Firma de solicitud de proxy no válida." -#: nova/api/metadata/handler.py:197 nova/api/metadata/handler.py:204 +#: nova/api/metadata/handler.py:205 #, python-format msgid "Failed to get metadata for instance id: %s" msgstr "No se han podido obtener metadatos para el id de instancia: %s" -#: nova/api/metadata/handler.py:208 -#, python-format -msgid "" -"Tenant_id %(tenant_id)s does not match tenant_id of instance " -"%(instance_id)s." -msgstr "" -"Tenant_id %(tenant_id)s no coincide con tenant_id de la instancia " -"%(instance_id)s." - -#: nova/api/metadata/vendordata_json.py:47 -msgid "file does not exist" -msgstr "No existe el fichero" - -#: nova/api/metadata/vendordata_json.py:49 -msgid "Unexpected IOError when reading" -msgstr "Error de E/S no esperado al leer" - -#: nova/api/metadata/vendordata_json.py:52 -msgid "failed to load json" -msgstr "Ha fallado la carga de json" - -#: nova/api/openstack/__init__.py:89 +#: nova/api/openstack/__init__.py:92 #, python-format msgid "Caught error: %s" msgstr "Capturado error: %s" -#: nova/api/openstack/__init__.py:98 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "Se ha devuelto %(url)s con HTTP %(status)d" - -#: nova/api/openstack/__init__.py:186 +#: nova/api/openstack/__init__.py:189 msgid "Must specify an ExtensionManager class" msgstr "Debe especificar una clase ExtensionManager" -#: nova/api/openstack/__init__.py:232 nova/api/openstack/__init__.py:406 -#, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" -msgstr "" -"Ampliación %(ext_name)s: no se puede ampliar el recurso %(collection)s: " -"no existe dicho recurso." - -#: nova/api/openstack/__init__.py:279 -#: nova/api/openstack/compute/plugins/v3/servers.py:99 -#, python-format -msgid "Not loading %s because it is in the blacklist" -msgstr "No se ha cargado %s porque está en la lista negra" - -#: nova/api/openstack/__init__.py:284 -#: nova/api/openstack/compute/plugins/v3/servers.py:104 -#, python-format -msgid "Not loading %s because it is not in the whitelist" -msgstr "No se ha cargado %s porque no está en la lista blanca" - -#: nova/api/openstack/__init__.py:291 -msgid "V3 API has been disabled by configuration" -msgstr "" - -#: nova/api/openstack/__init__.py:304 -#, python-format -msgid "Extensions in both blacklist and whitelist: %s" -msgstr "Extensiones en lista restrictiva y lista permisiva: %s" - -#: nova/api/openstack/__init__.py:328 -#, python-format -msgid "Missing core API extensions: %s" -msgstr "Extensiones core API omitidas: %s" - -#: nova/api/openstack/common.py:132 -#, python-format -msgid "" -"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. " -"Bad upgrade or db corrupted?" -msgstr "" -"el estado es UNKNOWN de vm_state=%(vm_state)s task_state=%(task_state)s. " -"¿Actualización errónea o base de datos dañada?" - -#: nova/api/openstack/common.py:182 +#: nova/api/openstack/common.py:185 #, python-format msgid "%s param must be an integer" msgstr "El parámetro %s debe ser un entero" -#: nova/api/openstack/common.py:185 +#: nova/api/openstack/common.py:188 #, python-format msgid "%s param must be positive" msgstr "El parámetro %s debe ser positivo" -#: nova/api/openstack/common.py:210 +#: nova/api/openstack/common.py:213 msgid "offset param must be an integer" msgstr "el parámetro de desplazamiento debe ser un entero" -#: nova/api/openstack/common.py:216 +#: nova/api/openstack/common.py:219 msgid "limit param must be an integer" msgstr "el parámetro de límite debe ser un entero" -#: nova/api/openstack/common.py:220 +#: nova/api/openstack/common.py:223 msgid "limit param must be positive" msgstr "el parámetro de límite debe ser positivo" -#: nova/api/openstack/common.py:224 +#: nova/api/openstack/common.py:227 msgid "offset param must be positive" msgstr "el parámetro de desplazamiento debe ser positivo" -#: nova/api/openstack/common.py:276 +#: nova/api/openstack/common.py:280 #, python-format msgid "href %s does not contain version" msgstr "href %s no contiene la versión" -#: nova/api/openstack/common.py:291 +#: nova/api/openstack/common.py:293 msgid "Image metadata limit exceeded" msgstr "Se ha superado el límite de metadatos de imágenes" -#: nova/api/openstack/common.py:299 +#: nova/api/openstack/common.py:301 msgid "Image metadata key cannot be blank" msgstr "La clave de metadatos de imagen no puede estar en blanco" -#: nova/api/openstack/common.py:302 +#: nova/api/openstack/common.py:304 msgid "Image metadata key too long" msgstr "La clave de metadatos de imagen es demasiado larga" -#: nova/api/openstack/common.py:305 +#: nova/api/openstack/common.py:307 msgid "Invalid image metadata" msgstr "Metadatos de imagen no válidos " -#: nova/api/openstack/common.py:368 +#: nova/api/openstack/common.py:370 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "No se puede '%(action)s' mientras la instancia está en %(attr)s %(state)s" -#: nova/api/openstack/common.py:371 +#: nova/api/openstack/common.py:373 #, python-format msgid "Cannot '%s' an instance which has never been active" msgstr "No se puede '%s' una instancia que nunca ha estado activa" -#: nova/api/openstack/common.py:374 +#: nova/api/openstack/common.py:376 #, python-format msgid "Instance is in an invalid state for '%s'" msgstr "La instancia se encuentra en un estado inválido para '%s'" -#: nova/api/openstack/common.py:454 -msgid "Rejecting snapshot request, snapshots currently disabled" -msgstr "" -"Rechazando solicitud de instantánea, instantáneas inhabilitadas " -"actualmente" - -#: nova/api/openstack/common.py:456 +#: nova/api/openstack/common.py:458 msgid "Instance snapshots are not permitted at this time." msgstr "Las instantáneas de instancia no están permitidas en este momento." -#: nova/api/openstack/common.py:577 +#: nova/api/openstack/common.py:579 msgid "Cells is not enabled." msgstr "Las celdas no están habilitadas." -#: nova/api/openstack/extensions.py:197 +#: nova/api/openstack/extensions.py:198 #, python-format msgid "Loaded extension: %s" msgstr "Ampliación cargada: %s" -#: nova/api/openstack/extensions.py:243 +#: nova/api/openstack/extensions.py:244 #: nova/api/openstack/compute/plugins/__init__.py:51 #, python-format msgid "Exception loading extension: %s" msgstr "Excepción al cargar ampliación: %s" -#: nova/api/openstack/extensions.py:278 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "No se ha podido cargar la ampliación %(ext_factory)s: %(exc)s" - -#: nova/api/openstack/extensions.py:349 +#: nova/api/openstack/extensions.py:350 #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "No se ha podido cargar la ampliación %(classpath)s: %(exc)s" -#: nova/api/openstack/extensions.py:372 +#: nova/api/openstack/extensions.py:373 #, python-format msgid "Failed to load extension %(ext_name)s:%(exc)s" msgstr "Fallo al cargar extensión %(ext_name)s:%(exc)s" -#: nova/api/openstack/extensions.py:494 +#: nova/api/openstack/extensions.py:495 msgid "Unexpected exception in API method" msgstr "Excepción inesperada en método API." -#: nova/api/openstack/extensions.py:495 +#: nova/api/openstack/extensions.py:496 #, python-format msgid "" "Unexpected API Error. Please report this at " @@ -2706,56 +2594,41 @@ msgstr "" "posible.\n" "%s" -#: nova/api/openstack/wsgi.py:228 nova/api/openstack/wsgi.py:633 +#: nova/api/openstack/wsgi.py:230 nova/api/openstack/wsgi.py:635 msgid "cannot understand JSON" msgstr "no se puede entender JSON" -#: nova/api/openstack/wsgi.py:638 +#: nova/api/openstack/wsgi.py:640 msgid "too many body keys" msgstr "demasiadas claves de cuerpo" -#: nova/api/openstack/wsgi.py:682 -#, python-format -msgid "Exception handling resource: %s" -msgstr "Excepción al manejar recurso: %s" - -#: nova/api/openstack/wsgi.py:686 -#, python-format -msgid "Fault thrown: %s" -msgstr "Error emitido: %s" - -#: nova/api/openstack/wsgi.py:689 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Excepción de HTTP emitida: %s" - -#: nova/api/openstack/wsgi.py:919 +#: nova/api/openstack/wsgi.py:921 #, python-format msgid "There is no such action: %s" msgstr "No existe esta acción: %s" -#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:949 +#: nova/api/openstack/wsgi.py:924 nova/api/openstack/wsgi.py:951 #: nova/api/openstack/compute/server_metadata.py:57 #: nova/api/openstack/compute/server_metadata.py:75 #: nova/api/openstack/compute/server_metadata.py:100 #: nova/api/openstack/compute/server_metadata.py:126 -#: nova/api/openstack/compute/contrib/evacuate.py:45 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:58 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:73 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:95 +#: nova/api/openstack/compute/contrib/evacuate.py:47 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:60 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:75 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:97 msgid "Malformed request body" msgstr "Cuerpo de solicitud formado incorrectamente" -#: nova/api/openstack/wsgi.py:926 +#: nova/api/openstack/wsgi.py:928 #, python-format msgid "Action: '%(action)s', body: %(body)s" msgstr "Acción: '%(action)s', cuerpo: %(body)s" -#: nova/api/openstack/wsgi.py:946 +#: nova/api/openstack/wsgi.py:948 msgid "Unsupported Content-Type" msgstr "Tipo de contenido no soportado" -#: nova/api/openstack/wsgi.py:958 +#: nova/api/openstack/wsgi.py:960 #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " @@ -2788,7 +2661,7 @@ msgid "Initializing extension manager." msgstr "Inicializando gestor de ampliación." #: nova/api/openstack/compute/flavors.py:107 -#: nova/api/openstack/compute/plugins/v3/flavors.py:70 +#: nova/api/openstack/compute/plugins/v3/flavors.py:72 #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public no válido [%s]" @@ -2804,57 +2677,57 @@ msgid "Invalid minDisk filter [%s]" msgstr "Filtro minDisk no válido [%s]" #: nova/api/openstack/compute/flavors.py:146 -#: nova/api/openstack/compute/servers.py:603 -#: nova/api/openstack/compute/plugins/v3/flavors.py:110 -#: nova/api/openstack/compute/plugins/v3/servers.py:280 +#: nova/api/openstack/compute/servers.py:606 +#: nova/api/openstack/compute/plugins/v3/flavors.py:112 +#: nova/api/openstack/compute/plugins/v3/servers.py:303 #, python-format msgid "marker [%s] not found" msgstr "no se ha encontrado el marcador [%s]" -#: nova/api/openstack/compute/image_metadata.py:35 -#: nova/api/openstack/compute/images.py:141 -#: nova/api/openstack/compute/images.py:157 +#: nova/api/openstack/compute/image_metadata.py:37 +#: nova/api/openstack/compute/images.py:135 +#: nova/api/openstack/compute/images.py:151 msgid "Image not found." msgstr "Imagen no encontrada." -#: nova/api/openstack/compute/image_metadata.py:78 +#: nova/api/openstack/compute/image_metadata.py:81 msgid "Incorrect request body format" msgstr "Formato de cuerpo de solicitud incorrecto" -#: nova/api/openstack/compute/image_metadata.py:82 +#: nova/api/openstack/compute/image_metadata.py:85 #: nova/api/openstack/compute/server_metadata.py:79 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:108 #: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:77 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:79 msgid "Request body and URI mismatch" msgstr "Discrepancia de URI y cuerpo de solicitud" -#: nova/api/openstack/compute/image_metadata.py:85 +#: nova/api/openstack/compute/image_metadata.py:88 #: nova/api/openstack/compute/server_metadata.py:83 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:111 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:81 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:83 msgid "Request body contains too many items" msgstr "El cuerpo de solicitud contiene demasiados elementos" -#: nova/api/openstack/compute/image_metadata.py:117 +#: nova/api/openstack/compute/image_metadata.py:122 msgid "Invalid metadata key" msgstr "Clave de metadatos no válida" -#: nova/api/openstack/compute/images.py:162 +#: nova/api/openstack/compute/images.py:156 msgid "You are not allowed to delete the image." msgstr "No le está permitido suprimir la imagen." #: nova/api/openstack/compute/ips.py:67 -#: nova/api/openstack/compute/plugins/v3/ips.py:39 +#: nova/api/openstack/compute/plugins/v3/ips.py:41 msgid "Instance does not exist" msgstr "La instancia no existe " #: nova/api/openstack/compute/ips.py:90 -#: nova/api/openstack/compute/plugins/v3/ips.py:60 +#: nova/api/openstack/compute/plugins/v3/ips.py:62 msgid "Instance is not a member of specified network" msgstr "La instancia no es miembro de la red especificada" -#: nova/api/openstack/compute/limits.py:161 +#: nova/api/openstack/compute/limits.py:162 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " @@ -2863,141 +2736,126 @@ msgstr "" "Solo se pueden realizar %(value)s solicitud(es) de %(verb)s para %(uri)s " "cada %(unit_string)s." -#: nova/api/openstack/compute/limits.py:287 +#: nova/api/openstack/compute/limits.py:288 msgid "This request was rate-limited." msgstr "Esta solicitud estaba limitada por tipo." #: nova/api/openstack/compute/server_metadata.py:37 #: nova/api/openstack/compute/server_metadata.py:122 #: nova/api/openstack/compute/server_metadata.py:177 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:41 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:43 msgid "Server does not exist" msgstr "El servidor no existe" #: nova/api/openstack/compute/server_metadata.py:157 #: nova/api/openstack/compute/server_metadata.py:168 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:144 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:156 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:146 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:158 msgid "Metadata item was not found" msgstr "No se ha encontrado el elemento metadatos" -#: nova/api/openstack/compute/servers.py:81 -msgid "" -"XML support has been deprecated and may be removed as early as the Juno " -"release." -msgstr "" -"El soporte XML está obsoleto y podrá ser removito tan pronto como la " -"liberación de Juno." - -#: nova/api/openstack/compute/servers.py:551 -#: nova/api/openstack/compute/contrib/cells.py:423 -#: nova/api/openstack/compute/plugins/v3/cells.py:331 +#: nova/api/openstack/compute/servers.py:554 +#: nova/api/openstack/compute/contrib/cells.py:427 msgid "Invalid changes-since value" msgstr "Valor de changes-since no válido" -#: nova/api/openstack/compute/servers.py:570 -#: nova/api/openstack/compute/plugins/v3/servers.py:234 +#: nova/api/openstack/compute/servers.py:573 +#: nova/api/openstack/compute/plugins/v3/servers.py:257 msgid "Only administrators may list deleted instances" msgstr "Sólo los administradores pueden listar instancias suprimidas " -#: nova/api/openstack/compute/servers.py:606 -#: nova/api/openstack/compute/plugins/v3/servers.py:283 -#, python-format -msgid "Flavor '%s' could not be found " -msgstr "El sabor '%s' no se ha podido encontrar " - -#: nova/api/openstack/compute/servers.py:625 -#: nova/api/openstack/compute/servers.py:772 -#: nova/api/openstack/compute/servers.py:1081 +#: nova/api/openstack/compute/servers.py:627 +#: nova/api/openstack/compute/servers.py:774 +#: nova/api/openstack/compute/servers.py:1078 #: nova/api/openstack/compute/servers.py:1203 #: nova/api/openstack/compute/servers.py:1388 -#: nova/api/openstack/compute/plugins/v3/servers.py:617 -#: nova/api/openstack/compute/plugins/v3/servers.py:729 -#: nova/api/openstack/compute/plugins/v3/servers.py:848 +#: nova/api/openstack/compute/plugins/v3/servers.py:650 +#: nova/api/openstack/compute/plugins/v3/servers.py:768 +#: nova/api/openstack/compute/plugins/v3/servers.py:889 msgid "Instance could not be found" msgstr "No se ha podido encontrar la instancia" -#: nova/api/openstack/compute/servers.py:656 +#: nova/api/openstack/compute/servers.py:658 #, python-format msgid "Bad personality format: missing %s" msgstr "Formato de personalidad incorrecto : faltan %s " -#: nova/api/openstack/compute/servers.py:659 +#: nova/api/openstack/compute/servers.py:661 msgid "Bad personality format" msgstr "Formato de personalidad incorrecto " -#: nova/api/openstack/compute/servers.py:662 +#: nova/api/openstack/compute/servers.py:664 #, python-format msgid "Personality content for %s cannot be decoded" msgstr "No se puede decodificar el contenido de personalidad para %s" -#: nova/api/openstack/compute/servers.py:677 +#: nova/api/openstack/compute/servers.py:679 msgid "Unknown argument : port" msgstr "Argumento desconocido: puerto" -#: nova/api/openstack/compute/servers.py:680 -#: nova/api/openstack/compute/plugins/v3/servers.py:338 +#: nova/api/openstack/compute/servers.py:682 +#: nova/api/openstack/compute/plugins/v3/servers.py:361 #, python-format msgid "Bad port format: port uuid is not in proper format (%s)" msgstr "" "Formato de puerto incorrecto: uuid de puerto no tiene el formato correcto" " (%s) " -#: nova/api/openstack/compute/servers.py:690 -#: nova/api/openstack/compute/plugins/v3/servers.py:354 +#: nova/api/openstack/compute/servers.py:692 +#: nova/api/openstack/compute/plugins/v3/servers.py:377 #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "Formato incorrecto de redes: el uuid de red no está en el formato " "correcto (%s) " -#: nova/api/openstack/compute/servers.py:701 -#: nova/api/openstack/compute/plugins/v3/servers.py:327 +#: nova/api/openstack/compute/servers.py:703 +#: nova/api/openstack/compute/plugins/v3/servers.py:350 #, python-format msgid "Invalid fixed IP address (%s)" msgstr "Dirección IP fija no válida (%s) " -#: nova/api/openstack/compute/servers.py:714 -#: nova/api/openstack/compute/plugins/v3/servers.py:369 +#: nova/api/openstack/compute/servers.py:716 +#: nova/api/openstack/compute/plugins/v3/servers.py:392 #, python-format msgid "Duplicate networks (%s) are not allowed" msgstr "Las redes duplicadas (%s) no están permitidas" -#: nova/api/openstack/compute/servers.py:720 -#: nova/api/openstack/compute/plugins/v3/servers.py:375 +#: nova/api/openstack/compute/servers.py:722 +#: nova/api/openstack/compute/plugins/v3/servers.py:398 #, python-format msgid "Bad network format: missing %s" msgstr "Formato de red erróneo: falta %s" -#: nova/api/openstack/compute/servers.py:723 -#: nova/api/openstack/compute/servers.py:824 -#: nova/api/openstack/compute/plugins/v3/servers.py:378 +#: nova/api/openstack/compute/servers.py:725 +#: nova/api/openstack/compute/servers.py:826 +#: nova/api/openstack/compute/plugins/v3/servers.py:401 msgid "Bad networks format" msgstr "Formato de redes erróneo" -#: nova/api/openstack/compute/servers.py:749 +#: nova/api/openstack/compute/servers.py:751 msgid "Userdata content cannot be decoded" msgstr "No se puede decodificar el contenido de datos de usuario" -#: nova/api/openstack/compute/servers.py:754 +#: nova/api/openstack/compute/servers.py:756 msgid "accessIPv4 is not proper IPv4 format" msgstr "accessIPv4 no está en formato IPv4 correcto" -#: nova/api/openstack/compute/servers.py:759 +#: nova/api/openstack/compute/servers.py:761 msgid "accessIPv6 is not proper IPv6 format" msgstr "accessIPv6 no está en formato IPv6 correcto" -#: nova/api/openstack/compute/servers.py:788 -#: nova/api/openstack/compute/plugins/v3/servers.py:419 +#: nova/api/openstack/compute/servers.py:790 +#: nova/api/openstack/compute/plugins/v3/servers.py:443 msgid "Server name is not defined" msgstr "El nombre de servidor no está definido " -#: nova/api/openstack/compute/servers.py:840 -#: nova/api/openstack/compute/servers.py:968 +#: nova/api/openstack/compute/servers.py:842 +#: nova/api/openstack/compute/servers.py:970 msgid "Invalid flavorRef provided." msgstr "Se ha proporcionado flavorRef no válido. " -#: nova/api/openstack/compute/servers.py:880 +#: nova/api/openstack/compute/servers.py:882 msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." @@ -3005,80 +2863,80 @@ msgstr "" "El uso de sintáxis diferentes de block_device_mapping en la misma " "petición no está permitido." -#: nova/api/openstack/compute/servers.py:965 -#: nova/api/openstack/compute/plugins/v3/servers.py:495 +#: nova/api/openstack/compute/servers.py:967 +#: nova/api/openstack/compute/plugins/v3/servers.py:519 msgid "Can not find requested image" msgstr "No se puede encontrar la imagen solicitada " -#: nova/api/openstack/compute/servers.py:971 -#: nova/api/openstack/compute/plugins/v3/servers.py:501 +#: nova/api/openstack/compute/servers.py:973 +#: nova/api/openstack/compute/plugins/v3/servers.py:525 msgid "Invalid key_name provided." msgstr "Se ha proporcionado un nombre de clave no válido." -#: nova/api/openstack/compute/servers.py:974 -#: nova/api/openstack/compute/plugins/v3/servers.py:504 +#: nova/api/openstack/compute/servers.py:976 +#: nova/api/openstack/compute/plugins/v3/servers.py:528 msgid "Invalid config_drive provided." msgstr "La config_drive proporcionada es inválida." -#: nova/api/openstack/compute/servers.py:1066 +#: nova/api/openstack/compute/servers.py:1063 msgid "HostId cannot be updated." msgstr "El ID de host no se puede actualizar. " -#: nova/api/openstack/compute/servers.py:1070 +#: nova/api/openstack/compute/servers.py:1067 msgid "Personality cannot be updated." msgstr "No se puede actualizar la personalidad." -#: nova/api/openstack/compute/servers.py:1096 -#: nova/api/openstack/compute/servers.py:1115 -#: nova/api/openstack/compute/plugins/v3/servers.py:628 -#: nova/api/openstack/compute/plugins/v3/servers.py:644 +#: nova/api/openstack/compute/servers.py:1093 +#: nova/api/openstack/compute/servers.py:1112 +#: nova/api/openstack/compute/plugins/v3/servers.py:662 +#: nova/api/openstack/compute/plugins/v3/servers.py:679 msgid "Instance has not been resized." msgstr "La instancia no se ha redimensionado." -#: nova/api/openstack/compute/servers.py:1118 -#: nova/api/openstack/compute/plugins/v3/servers.py:647 +#: nova/api/openstack/compute/servers.py:1115 +#: nova/api/openstack/compute/plugins/v3/servers.py:682 msgid "Flavor used by the instance could not be found." msgstr "No se ha podido encontrar el sabor utilizado por la instancia." -#: nova/api/openstack/compute/servers.py:1134 -#: nova/api/openstack/compute/plugins/v3/servers.py:661 +#: nova/api/openstack/compute/servers.py:1131 +#: nova/api/openstack/compute/plugins/v3/servers.py:697 msgid "Argument 'type' for reboot must be a string" msgstr "El argumento 'type' para reinicio debe ser una cadena" -#: nova/api/openstack/compute/servers.py:1140 -#: nova/api/openstack/compute/plugins/v3/servers.py:667 +#: nova/api/openstack/compute/servers.py:1137 +#: nova/api/openstack/compute/plugins/v3/servers.py:703 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "El argumento 'type' para el rearranque no es HARD o SOFT" -#: nova/api/openstack/compute/servers.py:1144 -#: nova/api/openstack/compute/plugins/v3/servers.py:671 +#: nova/api/openstack/compute/servers.py:1141 +#: nova/api/openstack/compute/plugins/v3/servers.py:707 msgid "Missing argument 'type' for reboot" msgstr "Falta el argumento 'type' para el rearranque" -#: nova/api/openstack/compute/servers.py:1171 -#: nova/api/openstack/compute/plugins/v3/servers.py:699 +#: nova/api/openstack/compute/servers.py:1168 +#: nova/api/openstack/compute/plugins/v3/servers.py:735 msgid "Unable to locate requested flavor." msgstr "No se puede ubicar el tipo solicitado." -#: nova/api/openstack/compute/servers.py:1174 -#: nova/api/openstack/compute/plugins/v3/servers.py:702 +#: nova/api/openstack/compute/servers.py:1171 +#: nova/api/openstack/compute/plugins/v3/servers.py:738 msgid "Resize requires a flavor change." msgstr "Redimensionar necesita un cambio de modelo. " -#: nova/api/openstack/compute/servers.py:1182 -#: nova/api/openstack/compute/plugins/v3/servers.py:710 +#: nova/api/openstack/compute/servers.py:1181 +#: nova/api/openstack/compute/plugins/v3/servers.py:748 msgid "You are not authorized to access the image the instance was started with." msgstr "" "No está autorizado a acceder a la imagen con la que se ha lanzado la " "instancia." -#: nova/api/openstack/compute/servers.py:1186 -#: nova/api/openstack/compute/plugins/v3/servers.py:714 +#: nova/api/openstack/compute/servers.py:1185 +#: nova/api/openstack/compute/plugins/v3/servers.py:752 msgid "Image that the instance was started with could not be found." msgstr "No se ha podido encontrar la imagen con la que se lanzó la instancia." -#: nova/api/openstack/compute/servers.py:1190 -#: nova/api/openstack/compute/plugins/v3/servers.py:718 +#: nova/api/openstack/compute/servers.py:1189 +#: nova/api/openstack/compute/plugins/v3/servers.py:756 msgid "Invalid instance image." msgstr "Imagen de instancia no válida." @@ -3123,118 +2981,63 @@ msgid "Could not parse imageRef from request." msgstr "No se ha podido analizar imageRef de la solicitud. " #: nova/api/openstack/compute/servers.py:1394 -#: nova/api/openstack/compute/plugins/v3/servers.py:854 +#: nova/api/openstack/compute/plugins/v3/servers.py:895 msgid "Cannot find image for rebuild" msgstr "No se puede encontrar la imagen para reconstrucción " -#: nova/api/openstack/compute/servers.py:1427 +#: nova/api/openstack/compute/servers.py:1428 msgid "createImage entity requires name attribute" msgstr "La entidad createImage necesita el atributo de nombre" -#: nova/api/openstack/compute/servers.py:1436 -#: nova/api/openstack/compute/contrib/admin_actions.py:288 -#: nova/api/openstack/compute/plugins/v3/servers.py:894 +#: nova/api/openstack/compute/servers.py:1437 +#: nova/api/openstack/compute/contrib/admin_actions.py:283 +#: nova/api/openstack/compute/plugins/v3/servers.py:936 msgid "Invalid metadata" msgstr "Metadatos no válidos" -#: nova/api/openstack/compute/servers.py:1494 +#: nova/api/openstack/compute/servers.py:1495 msgid "Invalid adminPass" msgstr "adminPass no válido " -#: nova/api/openstack/compute/contrib/admin_actions.py:63 -#: nova/api/openstack/compute/contrib/admin_actions.py:88 -#: nova/api/openstack/compute/contrib/admin_actions.py:113 -#: nova/api/openstack/compute/contrib/admin_actions.py:135 -#: nova/api/openstack/compute/contrib/admin_actions.py:178 -#: nova/api/openstack/compute/contrib/admin_actions.py:197 -#: nova/api/openstack/compute/contrib/admin_actions.py:216 -#: nova/api/openstack/compute/contrib/admin_actions.py:235 -#: nova/api/openstack/compute/contrib/admin_actions.py:393 -#: nova/api/openstack/compute/contrib/multinic.py:43 +#: nova/api/openstack/compute/contrib/admin_actions.py:64 +#: nova/api/openstack/compute/contrib/admin_actions.py:86 +#: nova/api/openstack/compute/contrib/admin_actions.py:108 +#: nova/api/openstack/compute/contrib/admin_actions.py:130 +#: nova/api/openstack/compute/contrib/admin_actions.py:173 +#: nova/api/openstack/compute/contrib/admin_actions.py:192 +#: nova/api/openstack/compute/contrib/admin_actions.py:211 +#: nova/api/openstack/compute/contrib/admin_actions.py:230 +#: nova/api/openstack/compute/contrib/admin_actions.py:388 +#: nova/api/openstack/compute/contrib/multinic.py:44 #: nova/api/openstack/compute/contrib/rescue.py:45 #: nova/api/openstack/compute/contrib/shelve.py:43 msgid "Server not found" msgstr "Servidor no encontrado" -#: nova/api/openstack/compute/contrib/admin_actions.py:66 -#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 -#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 -msgid "Virt driver does not implement pause function." -msgstr "El controlador Virt no implementa la función de pausa." - -#: nova/api/openstack/compute/contrib/admin_actions.py:70 -#, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:91 -msgid "Virt driver does not implement unpause function." -msgstr "El controlador Virt no implementa una función de unpause." - -#: nova/api/openstack/compute/contrib/admin_actions.py:95 -#, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:117 -#, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:139 -#, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:165 -#, python-format -msgid "Error in migrate %s" -msgstr "Error al migrar %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:184 -#, python-format -msgid "Compute.api::reset_network %s" -msgstr "Compute.api::reset_network %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:203 -#, python-format -msgid "Compute.api::inject_network_info %s" -msgstr "Compute.api::inject_network_info %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:220 -#, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:239 -#, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" - -#: nova/api/openstack/compute/contrib/admin_actions.py:265 +#: nova/api/openstack/compute/contrib/admin_actions.py:260 #, python-format msgid "createBackup entity requires %s attribute" msgstr "La entidad createBackup necesita el atributo %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:269 +#: nova/api/openstack/compute/contrib/admin_actions.py:264 msgid "Malformed createBackup entity" msgstr "Entidad createBackup formada incorrectamente" -#: nova/api/openstack/compute/contrib/admin_actions.py:275 +#: nova/api/openstack/compute/contrib/admin_actions.py:270 msgid "createBackup attribute 'rotation' must be an integer" msgstr "La 'rotación' del atributo createBackup debe ser un entero" -#: nova/api/openstack/compute/contrib/admin_actions.py:278 +#: nova/api/openstack/compute/contrib/admin_actions.py:273 msgid "createBackup attribute 'rotation' must be greater than or equal to zero" msgstr "El atributo de createBackup 'rotation' debe ser mayor que o igual a cero" -#: nova/api/openstack/compute/contrib/admin_actions.py:294 +#: nova/api/openstack/compute/contrib/admin_actions.py:289 #: nova/api/openstack/compute/contrib/console_output.py:46 #: nova/api/openstack/compute/contrib/server_start_stop.py:40 msgid "Instance not found" msgstr "No se ha encontrado la instancia " -#: nova/api/openstack/compute/contrib/admin_actions.py:325 +#: nova/api/openstack/compute/contrib/admin_actions.py:320 msgid "" "host, block_migration and disk_over_commit must be specified for live " "migration." @@ -3242,59 +3045,69 @@ msgstr "" "host, block_migration y disk_over_commit deben especificarse para " "migración en vivo." -#: nova/api/openstack/compute/contrib/admin_actions.py:362 +#: nova/api/openstack/compute/contrib/admin_actions.py:357 #, python-format msgid "Live migration of instance %s to another host failed" msgstr "Ha fallado la migración en vivo de la instancia %s a otro host" -#: nova/api/openstack/compute/contrib/admin_actions.py:365 +#: nova/api/openstack/compute/contrib/admin_actions.py:360 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "La migración en directo de la instancia %(id)s al host %(host)s ha fallado" -#: nova/api/openstack/compute/contrib/admin_actions.py:383 -#: nova/api/openstack/compute/plugins/v3/admin_actions.py:83 +#: nova/api/openstack/compute/contrib/admin_actions.py:378 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "Se debe especificar el estado deseado. Los estados válidos son: %s" -#: nova/api/openstack/compute/contrib/admin_actions.py:397 +#: nova/api/openstack/compute/contrib/agents.py:100 +#: nova/api/openstack/compute/contrib/agents.py:118 +#: nova/api/openstack/compute/contrib/agents.py:156 +#: nova/api/openstack/compute/contrib/cloudpipe_update.py:55 #, python-format -msgid "Compute.api::resetState %s" -msgstr "Compute.api::resetState %s" +msgid "Invalid request body: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/aggregates.py:39 +msgid "Only host parameter can be specified" +msgstr "" + +#: nova/api/openstack/compute/contrib/aggregates.py:42 +msgid "Host parameter must be specified" +msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:161 +#: nova/api/openstack/compute/contrib/aggregates.py:168 #, python-format msgid "Aggregates does not have %s action" msgstr "Los agregados no tienen la acción %s " -#: nova/api/openstack/compute/contrib/aggregates.py:165 +#: nova/api/openstack/compute/contrib/aggregates.py:172 #: nova/api/openstack/compute/contrib/flavormanage.py:55 #: nova/api/openstack/compute/contrib/keypairs.py:86 #: nova/api/openstack/compute/plugins/v3/aggregates.py:169 msgid "Invalid request body" msgstr "Cuerpo de solicitud no válido" -#: nova/api/openstack/compute/contrib/aggregates.py:175 -#: nova/api/openstack/compute/contrib/aggregates.py:180 +#: nova/api/openstack/compute/contrib/aggregates.py:182 +#: nova/api/openstack/compute/contrib/aggregates.py:187 #, python-format msgid "Cannot add host %(host)s in aggregate %(id)s" msgstr "No se puede añadir el host %(host)s en el agregado %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:194 -#: nova/api/openstack/compute/contrib/aggregates.py:198 +#: nova/api/openstack/compute/contrib/aggregates.py:201 +#: nova/api/openstack/compute/contrib/aggregates.py:205 #: nova/api/openstack/compute/plugins/v3/aggregates.py:153 #: nova/api/openstack/compute/plugins/v3/aggregates.py:157 #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "No se puede eliminar el host %(host)s en el agregado %(id)s" -#: nova/api/openstack/compute/contrib/aggregates.py:217 +#: nova/api/openstack/compute/contrib/aggregates.py:224 #: nova/api/openstack/compute/plugins/v3/aggregates.py:177 msgid "The value of metadata must be a dict" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:229 +#: nova/api/openstack/compute/contrib/aggregates.py:237 #, python-format msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "No se pueden establecer metadatos %(metadata)s en el agregado %(id)s" @@ -3310,28 +3123,32 @@ msgstr "Se ha creado instantánea asistida del volúmen %s" msgid "Delete snapshot with id: %s" msgstr "Suprimir instantánea con el ID: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:104 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:105 msgid "Attach interface" msgstr "Conectar interfaz" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:119 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:154 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:177 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:169 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:158 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:184 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174 +#: nova/network/security_group/neutron_driver.py:510 +#: nova/network/security_group/neutron_driver.py:514 +#: nova/network/security_group/neutron_driver.py:518 +#: nova/network/security_group/neutron_driver.py:522 +#: nova/network/security_group/neutron_driver.py:526 msgid "Network driver does not support this function." msgstr "El controlador de red no soporta esta función." -#: nova/api/openstack/compute/contrib/attach_interfaces.py:123 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:124 msgid "Failed to attach interface" msgstr "Se ha encontrado un error al conectar la interfaz." -#: nova/api/openstack/compute/contrib/attach_interfaces.py:130 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:131 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:134 msgid "Attachments update is not supported" msgstr "La actualización de dispositivos conectados no está soportada" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:142 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:142 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:146 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144 #, python-format msgid "Detach interface %s" msgstr "Desconectar interfaz %s" @@ -3345,40 +3162,33 @@ msgstr "Se debe especificar la dirección en la forma xx:xx:xx:xx:xx:xx:xx" msgid "Must specify id or address" msgstr "Debe especificar id o dirección" -#: nova/api/openstack/compute/contrib/cells.py:252 +#: nova/api/openstack/compute/contrib/cells.py:250 #, python-format msgid "Cell %(id)s not found." msgstr "No se ha encontrado Cell %(id)s." -#: nova/api/openstack/compute/contrib/cells.py:285 -#: nova/api/openstack/compute/plugins/v3/cells.py:192 +#: nova/api/openstack/compute/contrib/cells.py:286 msgid "Cell name cannot be empty" msgstr "El nombre de célula no puede estar vacío" #: nova/api/openstack/compute/contrib/cells.py:289 -#: nova/api/openstack/compute/plugins/v3/cells.py:196 msgid "Cell name cannot contain '!' or '.'" msgstr "El nombre de célula no puede contener '!' o '.'" -#: nova/api/openstack/compute/contrib/cells.py:296 -#: nova/api/openstack/compute/plugins/v3/cells.py:203 +#: nova/api/openstack/compute/contrib/cells.py:295 msgid "Cell type must be 'parent' or 'child'" msgstr "El tipo de célula debe ser 'padre' o 'hijo'" -#: nova/api/openstack/compute/contrib/cells.py:352 -#: nova/api/openstack/compute/contrib/cells.py:376 -#: nova/api/openstack/compute/plugins/v3/cells.py:259 -#: nova/api/openstack/compute/plugins/v3/cells.py:282 +#: nova/api/openstack/compute/contrib/cells.py:353 +#: nova/api/openstack/compute/contrib/cells.py:378 msgid "No cell information in request" msgstr "No hay información de célula en la solicitud" #: nova/api/openstack/compute/contrib/cells.py:357 -#: nova/api/openstack/compute/plugins/v3/cells.py:264 msgid "No cell name in request" msgstr "No hay ningún nombre de célula en la solicitud" -#: nova/api/openstack/compute/contrib/cells.py:411 -#: nova/api/openstack/compute/plugins/v3/cells.py:319 +#: nova/api/openstack/compute/contrib/cells.py:415 msgid "Only 'updated_since', 'project_id' and 'deleted' are understood." msgstr "Solamente 'updated_since', 'project_id' y 'deleted' son entendidos." @@ -3455,23 +3265,27 @@ msgstr "Incapaz de obtener consola rdp, funcionalidad no implementada" msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s debe ser 'MANUAL' o 'AUTO'." -#: nova/api/openstack/compute/contrib/evacuate.py:53 -msgid "host and onSharedStorage must be specified." -msgstr "Se deben especificar host y onSharedStorage" +#: nova/api/openstack/compute/contrib/evacuate.py:54 +msgid "host must be specified." +msgstr "" #: nova/api/openstack/compute/contrib/evacuate.py:61 +msgid "onSharedStorage must be specified." +msgstr "" + +#: nova/api/openstack/compute/contrib/evacuate.py:69 #: nova/api/openstack/compute/plugins/v3/evacuate.py:67 msgid "admin password can't be changed on existing disk" msgstr "No se puede cambiar la contraseña de administrador en el disco existente" -#: nova/api/openstack/compute/contrib/evacuate.py:71 -#: nova/api/openstack/compute/plugins/v3/evacuate.py:77 +#: nova/api/openstack/compute/contrib/evacuate.py:80 +#: nova/api/openstack/compute/plugins/v3/evacuate.py:78 #, python-format msgid "Compute host %s not found." msgstr "No se ha encontrado Compute host %s." -#: nova/api/openstack/compute/contrib/evacuate.py:77 -#: nova/api/openstack/compute/plugins/v3/evacuate.py:83 +#: nova/api/openstack/compute/contrib/evacuate.py:86 +#: nova/api/openstack/compute/plugins/v3/evacuate.py:84 msgid "The target host can't be the same one." msgstr "" @@ -3523,88 +3337,88 @@ msgstr "" msgid "DNS entries not found." msgstr "No se han encontrado entradas DNS." -#: nova/api/openstack/compute/contrib/floating_ips.py:129 -#: nova/api/openstack/compute/contrib/floating_ips.py:183 +#: nova/api/openstack/compute/contrib/floating_ips.py:130 +#: nova/api/openstack/compute/contrib/floating_ips.py:186 #, python-format msgid "Floating ip not found for id %s" msgstr "No se ha encontrado la IP flotante para el id %s." -#: nova/api/openstack/compute/contrib/floating_ips.py:162 +#: nova/api/openstack/compute/contrib/floating_ips.py:163 #, python-format msgid "No more floating ips in pool %s." msgstr "No hay más IP flotantes en la agrupación %s." -#: nova/api/openstack/compute/contrib/floating_ips.py:164 +#: nova/api/openstack/compute/contrib/floating_ips.py:165 msgid "No more floating ips available." msgstr "No hay más IP flotantes disponibles." -#: nova/api/openstack/compute/contrib/floating_ips.py:168 +#: nova/api/openstack/compute/contrib/floating_ips.py:169 #, python-format msgid "IP allocation over quota in pool %s." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:170 +#: nova/api/openstack/compute/contrib/floating_ips.py:171 msgid "IP allocation over quota." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:220 -#: nova/api/openstack/compute/contrib/floating_ips.py:285 -#: nova/api/openstack/compute/contrib/security_groups.py:482 +#: nova/api/openstack/compute/contrib/floating_ips.py:223 +#: nova/api/openstack/compute/contrib/floating_ips.py:288 +#: nova/api/openstack/compute/contrib/security_groups.py:488 msgid "Missing parameter dict" msgstr "Falta el parámetro dict " -#: nova/api/openstack/compute/contrib/floating_ips.py:223 -#: nova/api/openstack/compute/contrib/floating_ips.py:288 +#: nova/api/openstack/compute/contrib/floating_ips.py:226 +#: nova/api/openstack/compute/contrib/floating_ips.py:291 msgid "Address not specified" msgstr "Dirección no especificada " -#: nova/api/openstack/compute/contrib/floating_ips.py:229 +#: nova/api/openstack/compute/contrib/floating_ips.py:232 msgid "No nw_info cache associated with instance" msgstr "No hay memoria caché nw_info asociada con la instancia " -#: nova/api/openstack/compute/contrib/floating_ips.py:234 +#: nova/api/openstack/compute/contrib/floating_ips.py:237 msgid "No fixed ips associated to instance" msgstr "No hay IP fijas asociadas a la instancia " -#: nova/api/openstack/compute/contrib/floating_ips.py:245 +#: nova/api/openstack/compute/contrib/floating_ips.py:248 msgid "Specified fixed address not assigned to instance" msgstr "Dirección fija especificada no asignada a la instancia" -#: nova/api/openstack/compute/contrib/floating_ips.py:259 +#: nova/api/openstack/compute/contrib/floating_ips.py:262 msgid "floating ip is already associated" msgstr "La IP flotante ya está asociada" -#: nova/api/openstack/compute/contrib/floating_ips.py:262 +#: nova/api/openstack/compute/contrib/floating_ips.py:265 msgid "l3driver call to add floating ip failed" msgstr "La llamada l3driver para añadir IP flotante ha fallado" -#: nova/api/openstack/compute/contrib/floating_ips.py:265 -#: nova/api/openstack/compute/contrib/floating_ips.py:296 +#: nova/api/openstack/compute/contrib/floating_ips.py:268 +#: nova/api/openstack/compute/contrib/floating_ips.py:299 msgid "floating ip not found" msgstr "No se ha encontrado IP flotante" -#: nova/api/openstack/compute/contrib/floating_ips.py:270 +#: nova/api/openstack/compute/contrib/floating_ips.py:273 msgid "Error. Unable to associate floating ip" msgstr "Error. No se puede asociar IP flotante" -#: nova/api/openstack/compute/contrib/floating_ips.py:311 +#: nova/api/openstack/compute/contrib/floating_ips.py:314 msgid "Floating ip is not associated" msgstr "La ip flotante no está asociada " -#: nova/api/openstack/compute/contrib/floating_ips.py:315 +#: nova/api/openstack/compute/contrib/floating_ips.py:318 #, python-format msgid "Floating ip %(address)s is not associated with instance %(id)s." msgstr "" "La dirección IP flotante %(address)s no está asociada con la instancia " "%(id)s." -#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:118 +#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:116 #: nova/api/openstack/compute/contrib/services.py:173 #: nova/api/openstack/compute/plugins/v3/services.py:124 msgid "Unknown action" msgstr "Acción desconocida" -#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:146 +#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:144 #: nova/cmd/manage.py:417 #, python-format msgid "/%s should be specified as single address(es) not in cidr format" @@ -3616,79 +3430,79 @@ msgstr "" msgid "fping utility is not found." msgstr "No se encuentra el programa de utilidad fping." -#: nova/api/openstack/compute/contrib/hosts.py:183 +#: nova/api/openstack/compute/contrib/hosts.py:185 #, python-format msgid "Invalid update setting: '%s'" msgstr "Valor de actualización no válido: '%s' " -#: nova/api/openstack/compute/contrib/hosts.py:186 +#: nova/api/openstack/compute/contrib/hosts.py:188 #, python-format msgid "Invalid status: '%s'" msgstr "Estado no válido: '%s' " -#: nova/api/openstack/compute/contrib/hosts.py:188 +#: nova/api/openstack/compute/contrib/hosts.py:190 #, python-format msgid "Invalid mode: '%s'" msgstr "Modalidad no válida: '%s' " -#: nova/api/openstack/compute/contrib/hosts.py:190 +#: nova/api/openstack/compute/contrib/hosts.py:192 msgid "'status' or 'maintenance_mode' needed for host update" msgstr "Se necesita 'status' o 'maintenance_mode' para actualización de host" -#: nova/api/openstack/compute/contrib/hosts.py:206 -#: nova/api/openstack/compute/plugins/v3/hosts.py:134 +#: nova/api/openstack/compute/contrib/hosts.py:208 +#: nova/api/openstack/compute/plugins/v3/hosts.py:135 #, python-format msgid "Putting host %(host_name)s in maintenance mode %(mode)s." msgstr "Poniendo el host %(host_name)s en modalidad de mantenimiento %(mode)s." -#: nova/api/openstack/compute/contrib/hosts.py:212 -#: nova/api/openstack/compute/plugins/v3/hosts.py:140 +#: nova/api/openstack/compute/contrib/hosts.py:214 +#: nova/api/openstack/compute/plugins/v3/hosts.py:141 msgid "Virt driver does not implement host maintenance mode." msgstr "El controlador virt no implementa la modalidad de mantenimiento de host." -#: nova/api/openstack/compute/contrib/hosts.py:227 -#: nova/api/openstack/compute/plugins/v3/hosts.py:156 +#: nova/api/openstack/compute/contrib/hosts.py:229 +#: nova/api/openstack/compute/plugins/v3/hosts.py:157 #, python-format msgid "Enabling host %s." msgstr "Habilitando el host %s." -#: nova/api/openstack/compute/contrib/hosts.py:229 -#: nova/api/openstack/compute/plugins/v3/hosts.py:158 +#: nova/api/openstack/compute/contrib/hosts.py:231 +#: nova/api/openstack/compute/plugins/v3/hosts.py:159 #, python-format msgid "Disabling host %s." msgstr "Inhabilitando el host %s." -#: nova/api/openstack/compute/contrib/hosts.py:234 -#: nova/api/openstack/compute/plugins/v3/hosts.py:163 +#: nova/api/openstack/compute/contrib/hosts.py:236 +#: nova/api/openstack/compute/plugins/v3/hosts.py:164 msgid "Virt driver does not implement host disabled status." msgstr "El controlador virt no implementa el estado inhabilitado de host." -#: nova/api/openstack/compute/contrib/hosts.py:250 -#: nova/api/openstack/compute/plugins/v3/hosts.py:181 +#: nova/api/openstack/compute/contrib/hosts.py:252 +#: nova/api/openstack/compute/plugins/v3/hosts.py:182 msgid "Virt driver does not implement host power management." msgstr "El controlador virt no implementa la gestión de alimentación de host." -#: nova/api/openstack/compute/contrib/hosts.py:336 -#: nova/api/openstack/compute/plugins/v3/hosts.py:274 +#: nova/api/openstack/compute/contrib/hosts.py:338 +#: nova/api/openstack/compute/plugins/v3/hosts.py:275 msgid "Describe-resource is admin only functionality" msgstr "El recurso de descripción es funcionalidad sólo de administrador" -#: nova/api/openstack/compute/contrib/hypervisors.py:193 -#: nova/api/openstack/compute/contrib/hypervisors.py:205 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:93 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:105 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:140 +#: nova/api/openstack/compute/contrib/hypervisors.py:208 +#: nova/api/openstack/compute/contrib/hypervisors.py:220 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:100 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:112 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:147 #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "El hipervisor con el ID '%s' no se ha podido encontrar. " -#: nova/api/openstack/compute/contrib/hypervisors.py:213 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:113 +#: nova/api/openstack/compute/contrib/hypervisors.py:228 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:120 msgid "Virt driver does not implement uptime function." msgstr "El controlador virt no implementa la función uptime." -#: nova/api/openstack/compute/contrib/hypervisors.py:229 -#: nova/api/openstack/compute/contrib/hypervisors.py:239 +#: nova/api/openstack/compute/contrib/hypervisors.py:244 +#: nova/api/openstack/compute/contrib/hypervisors.py:254 #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "No es ha podido encontrar ningún hipervisor que coincida con '%s'. " @@ -3703,27 +3517,22 @@ msgstr "Indicación de fecha y hora no válida para la fecha %s" msgid "Quota exceeded, too many key pairs." msgstr "Cuota superada, demasiados pares de claves." -#: nova/api/openstack/compute/contrib/multinic.py:54 +#: nova/api/openstack/compute/contrib/multinic.py:55 msgid "Missing 'networkId' argument for addFixedIp" msgstr "Falta el argumento 'networkId' para addFixedIp" -#: nova/api/openstack/compute/contrib/multinic.py:70 +#: nova/api/openstack/compute/contrib/multinic.py:75 msgid "Missing 'address' argument for removeFixedIp" msgstr "Falta el argumento 'address' para removeFixedIp " -#: nova/api/openstack/compute/contrib/multinic.py:80 -#, python-format -msgid "Unable to find address %r" -msgstr "No se puede encontrar la dirección %r" - #: nova/api/openstack/compute/contrib/networks_associate.py:40 #: nova/api/openstack/compute/contrib/networks_associate.py:56 #: nova/api/openstack/compute/contrib/networks_associate.py:74 -#: nova/api/openstack/compute/contrib/os_networks.py:78 -#: nova/api/openstack/compute/contrib/os_networks.py:93 -#: nova/api/openstack/compute/contrib/os_networks.py:106 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:137 +#: nova/api/openstack/compute/contrib/os_networks.py:79 +#: nova/api/openstack/compute/contrib/os_networks.py:94 +#: nova/api/openstack/compute/contrib/os_networks.py:107 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:112 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:144 msgid "Network not found" msgstr "No se ha encontrado la red" @@ -3745,68 +3554,54 @@ msgstr "" "La asociación de anfitrión no está implementada por la API de red " "configurada" -#: nova/api/openstack/compute/contrib/os_networks.py:81 +#: nova/api/openstack/compute/contrib/os_networks.py:82 msgid "Disassociate network is not implemented by the configured Network API" msgstr "La desasociación de red no está implementada por la API de red configurada" -#: nova/api/openstack/compute/contrib/os_networks.py:100 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 -#, python-format -msgid "Deleting network with id %s" -msgstr "Suprimiendo red con el id %s" - -#: nova/api/openstack/compute/contrib/os_networks.py:118 +#: nova/api/openstack/compute/contrib/os_networks.py:119 msgid "Missing network in body" msgstr "Falta red en el cuerpo" -#: nova/api/openstack/compute/contrib/os_networks.py:122 +#: nova/api/openstack/compute/contrib/os_networks.py:123 msgid "Network label is required" msgstr "Se necesita etiqueta de red" -#: nova/api/openstack/compute/contrib/os_networks.py:126 +#: nova/api/openstack/compute/contrib/os_networks.py:127 msgid "Network cidr or cidr_v6 is required" msgstr "Se necesita la red cidr o cidr_v6" -#: nova/api/openstack/compute/contrib/os_networks.py:152 +#: nova/api/openstack/compute/contrib/os_networks.py:153 msgid "VLAN support must be enabled" msgstr "El soporte de VLAN debe estar habilitado." -#: nova/api/openstack/compute/contrib/os_networks.py:155 +#: nova/api/openstack/compute/contrib/os_networks.py:156 #, python-format msgid "Cannot associate network %(network)s with project %(project)s: %(message)s" msgstr "" "No se puede asociar la red %(network)s con el proyecto %(project)s: " "%(message)s" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:83 -msgid "Failed to get default networks" -msgstr "Fallo al obtener las redes predeterminadas" - -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:122 -msgid "Failed to update usages deallocating network." -msgstr "No se han podido actualizar los usos desasignando la red." - -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:168 msgid "No CIDR requested" msgstr "No se ha solicitado ningún CIDR" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:163 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:174 msgid "Requested network does not contain enough (2+) usable hosts" msgstr "La red solicitada no contiene suficientes hosts utilizables (2+) " -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:167 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178 msgid "CIDR is malformed." msgstr "CIDR está formado incorrectamente." -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:170 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:181 msgid "Address could not be converted." msgstr "La dirección no se ha podido convertir." -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:189 msgid "Quota exceeded, too many networks." msgstr "Se ha superado la cuota, demasiadas redes." -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:191 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:202 msgid "Create networks failed" msgstr "Ha fallado la creación de redes" @@ -3867,7 +3662,7 @@ msgid "Malformed scheduler_hints attribute" msgstr "Atributo scheduler_hints formado incorrectamente" #: nova/api/openstack/compute/contrib/security_group_default_rules.py:127 -#: nova/api/openstack/compute/contrib/security_groups.py:387 +#: nova/api/openstack/compute/contrib/security_groups.py:394 msgid "Not enough parameters to build a valid rule." msgstr "No hay suficientes parámetros para crear una regla válida." @@ -3879,81 +3674,80 @@ msgstr "Esta regla predeterminada ya existe." msgid "security group default rule not found" msgstr "regla predeterminada de grupo de seguridad no encontrada" -#: nova/api/openstack/compute/contrib/security_groups.py:395 +#: nova/api/openstack/compute/contrib/security_groups.py:402 #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Prefijo erróneo para red en cidr %s" -#: nova/api/openstack/compute/contrib/security_groups.py:485 +#: nova/api/openstack/compute/contrib/security_groups.py:491 msgid "Security group not specified" msgstr "Grupo de seguridad no especificado" -#: nova/api/openstack/compute/contrib/security_groups.py:489 +#: nova/api/openstack/compute/contrib/security_groups.py:495 msgid "Security group name cannot be empty" msgstr "El nombre de grupo de seguridad no puede estar vacío" -#: nova/api/openstack/compute/contrib/server_external_events.py:92 +#: nova/api/openstack/compute/contrib/server_external_events.py:93 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:65 #, python-format msgid "event entity requires key %(key)s" msgstr "La entidad de evento requiere clave %(key)s" -#: nova/api/openstack/compute/contrib/server_external_events.py:96 +#: nova/api/openstack/compute/contrib/server_external_events.py:97 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:69 #, python-format msgid "event entity contains unsupported items: %s" msgstr "La entidad de evento contiene objetos no soportados: %s" -#: nova/api/openstack/compute/contrib/server_external_events.py:102 +#: nova/api/openstack/compute/contrib/server_external_events.py:103 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:75 #, python-format msgid "Invalid event status `%s'" msgstr "Estado de evento inválido: `%s'" -#: nova/api/openstack/compute/contrib/server_external_events.py:121 -#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94 +#: nova/api/openstack/compute/contrib/server_external_events.py:126 #, python-format -msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s" -msgstr "Cear evento %(name)s:%(tag)s para la instancia %(instance_uuid)s" +msgid "Creating event %(name)s:%(tag)s for instance %(instance_uuid)s" +msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:130 +#: nova/api/openstack/compute/contrib/server_external_events.py:148 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:103 msgid "No instances found for any event" msgstr "No se han encontrado instancias en cualquier evento" -#: nova/api/openstack/compute/contrib/server_groups.py:161 +#: nova/api/openstack/compute/contrib/server_groups.py:163 msgid "Conflicting policies configured!" msgstr "Políticas conflictivas configuradas!" -#: nova/api/openstack/compute/contrib/server_groups.py:166 +#: nova/api/openstack/compute/contrib/server_groups.py:168 #, python-format msgid "Invalid policies: %s" msgstr "Políticas inválidas: %s" -#: nova/api/openstack/compute/contrib/server_groups.py:171 +#: nova/api/openstack/compute/contrib/server_groups.py:173 msgid "Duplicate policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:176 +#: nova/api/openstack/compute/contrib/server_groups.py:178 msgid "the body is invalid." msgstr "El cuerpo es inválido." -#: nova/api/openstack/compute/contrib/server_groups.py:185 +#: nova/api/openstack/compute/contrib/server_groups.py:187 #, python-format msgid "'%s' is either missing or empty." msgstr "'%s' no se encuentra o está vacío." -#: nova/api/openstack/compute/contrib/server_groups.py:191 +#: nova/api/openstack/compute/contrib/server_groups.py:193 #, python-format msgid "Invalid format for name: '%s'" msgstr "Formato inválido para el nombre: '%s'" -#: nova/api/openstack/compute/contrib/server_groups.py:199 +#: nova/api/openstack/compute/contrib/server_groups.py:201 #, python-format msgid "'%s' is not a list" msgstr "'%s' no es una lista" -#: nova/api/openstack/compute/contrib/server_groups.py:203 +#: nova/api/openstack/compute/contrib/server_groups.py:205 #, python-format msgid "unsupported fields: %s" msgstr "Campos no soportados: %s" @@ -3982,11 +3776,11 @@ msgstr "Atributo no válido en la solicitud" msgid "Missing disabled reason field" msgstr "Campo disabled reason omitido." -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:230 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:231 msgid "Datetime is in invalid format" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:249 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:250 msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Hora de inicio no válida. La hora de inicio no pude tener lugar después " @@ -4067,12 +3861,12 @@ msgstr "access_ip_v6 no tiene el formato IPv6 apropiado" msgid "Invalid request format for metadata" msgstr "Formato de solicitud inválido para metadatos" -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:106 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:109 #, python-format msgid "Attach interface to %s" msgstr "Asociar interfaz a %s" -#: nova/api/openstack/compute/plugins/v3/cells.py:187 +#: nova/api/openstack/compute/plugins/v3/cells.py:189 #, python-format msgid "Cell %s doesn't exist." msgstr "No existe Cell %s." @@ -4098,36 +3892,55 @@ msgstr "" "El volumen %(volume_id)s no se encuentra asociado a la instancia " "%(server_id)s" -#: nova/api/openstack/compute/plugins/v3/flavors.py:94 +#: nova/api/openstack/compute/plugins/v3/flavors.py:96 #, python-format msgid "Invalid min_ram filter [%s]" msgstr "Filtro min_ram [%s] no válido" -#: nova/api/openstack/compute/plugins/v3/flavors.py:101 +#: nova/api/openstack/compute/plugins/v3/flavors.py:103 #, python-format msgid "Invalid min_disk filter [%s]" msgstr "Filtro min_disk inválido [%s]" -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:125 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:132 msgid "Need parameter 'query' to specify which hypervisor to filter on" msgstr "" "Se necesita el parámetro 'query' para especificar en qué hipervisor se " "aplicará el filtro" +#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 +msgid "Virt driver does not implement pause function." +msgstr "El controlador Virt no implementa la función de pausa." + #: nova/api/openstack/compute/plugins/v3/server_actions.py:76 #, python-format msgid "Action %s not found" msgstr "Acción %s no encontrada" -#: nova/api/openstack/compute/plugins/v3/servers.py:212 +#: nova/api/openstack/compute/plugins/v3/server_diagnostics.py:46 +msgid "Unable to get diagnostics, functionality not implemented" +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94 +#, python-format +msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s" +msgstr "Cear evento %(name)s:%(tag)s para la instancia %(instance_uuid)s" + +#: nova/api/openstack/compute/plugins/v3/servers.py:235 msgid "Invalid changes_since value" msgstr "Valor changes_since inválido" -#: nova/api/openstack/compute/plugins/v3/servers.py:335 +#: nova/api/openstack/compute/plugins/v3/servers.py:306 +#, python-format +msgid "Flavor '%s' could not be found " +msgstr "El sabor '%s' no se ha podido encontrar " + +#: nova/api/openstack/compute/plugins/v3/servers.py:358 msgid "Unknown argument: port" msgstr "Argumento desconocido: puerto" -#: nova/api/openstack/compute/plugins/v3/servers.py:343 +#: nova/api/openstack/compute/plugins/v3/servers.py:366 #, python-format msgid "" "Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port " @@ -4137,51 +3950,50 @@ msgstr "" "puerto '%(port)s': el puerto ya cuenta con una dirección IP física " "asignada." -#: nova/api/openstack/compute/plugins/v3/servers.py:412 -#: nova/api/openstack/compute/plugins/v3/servers.py:587 -msgid "The request body is invalid" -msgstr "El cuerpo solicitado es inválido" - -#: nova/api/openstack/compute/plugins/v3/servers.py:470 -#: nova/api/openstack/compute/plugins/v3/servers.py:498 +#: nova/api/openstack/compute/plugins/v3/servers.py:494 +#: nova/api/openstack/compute/plugins/v3/servers.py:522 msgid "Invalid flavor_ref provided." msgstr "Se ha proporcionado un flavor_ref inválido." -#: nova/api/openstack/compute/plugins/v3/servers.py:598 +#: nova/api/openstack/compute/plugins/v3/servers.py:620 +msgid "The request body is invalid" +msgstr "El cuerpo solicitado es inválido" + +#: nova/api/openstack/compute/plugins/v3/servers.py:631 msgid "host_id cannot be updated." msgstr "No se puede actualizar host_id." -#: nova/api/openstack/compute/plugins/v3/servers.py:743 +#: nova/api/openstack/compute/plugins/v3/servers.py:782 msgid "Invalid image_ref provided." msgstr "La image_ref proporcionada es inválida." -#: nova/api/openstack/compute/plugins/v3/servers.py:762 +#: nova/api/openstack/compute/plugins/v3/servers.py:801 msgid "Missing image_ref attribute" msgstr "Atributo image_ref ausente" -#: nova/api/openstack/compute/plugins/v3/servers.py:769 +#: nova/api/openstack/compute/plugins/v3/servers.py:808 msgid "Missing flavor_ref attribute" msgstr "Atributo flavor_ref ausente." -#: nova/api/openstack/compute/plugins/v3/servers.py:782 +#: nova/api/openstack/compute/plugins/v3/servers.py:822 msgid "Resize request has invalid 'flavor_ref' attribute." msgstr "" "La solicitud de modifiación de tamaño tiene el atributo 'flavor_ref' " "inválido." -#: nova/api/openstack/compute/plugins/v3/servers.py:785 +#: nova/api/openstack/compute/plugins/v3/servers.py:825 msgid "Resize requests require 'flavor_ref' attribute." msgstr "La solicitud de modificación de tamaño requiere el atributo 'flavor_ref'." -#: nova/api/openstack/compute/plugins/v3/servers.py:801 +#: nova/api/openstack/compute/plugins/v3/servers.py:842 msgid "Could not parse image_ref from request." msgstr "No se puede validar image_ref en la solicitud." -#: nova/api/openstack/compute/plugins/v3/servers.py:885 +#: nova/api/openstack/compute/plugins/v3/servers.py:927 msgid "create_image entity requires name attribute" msgstr "La entidad create_image requiere el atributo nombre." -#: nova/api/openstack/compute/plugins/v3/servers.py:947 +#: nova/api/openstack/compute/plugins/v3/servers.py:989 msgid "Invalid admin_password" msgstr "admin_password inválido" @@ -4189,11 +4001,7 @@ msgstr "admin_password inválido" msgid "Disabled reason contains invalid characters or is too long" msgstr "Disabled reason contiene caracteres inválidos o es demasiado larga." -#: nova/api/openstack/compute/views/servers.py:197 -msgid "Instance has had its instance_type removed from the DB" -msgstr "En la instancia se ha eliminado el tipo de instancia de la base de datos" - -#: nova/api/validation/validators.py:62 +#: nova/api/validation/validators.py:73 #, python-format msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" @@ -4212,59 +4020,59 @@ msgstr "" "Nova. Esto puede cambiar en el futuro, pero los desplegadores actuales " "deben estar concientes que el uso en producción ahora puede ser riesgoso." -#: nova/cells/messaging.py:205 +#: nova/cells/messaging.py:204 #, python-format msgid "Error processing message locally: %(exc)s" msgstr "Error al procesar el mensaje localmente: %(exc)s" -#: nova/cells/messaging.py:366 nova/cells/messaging.py:374 +#: nova/cells/messaging.py:365 nova/cells/messaging.py:373 #, python-format msgid "destination is %(target_cell)s but routing_path is %(routing_path)s" msgstr "" "el destino es %(target_cell)s pero la vía de acceso de direccionamiento " "es %(routing_path)s" -#: nova/cells/messaging.py:386 +#: nova/cells/messaging.py:385 #, python-format msgid "Unknown %(cell_type)s when routing to %(target_cell)s" msgstr "%(cell_type)s desconocido al direccionar a %(target_cell)s" -#: nova/cells/messaging.py:410 +#: nova/cells/messaging.py:409 #, python-format msgid "Error locating next hop for message: %(exc)s" msgstr "Error al localizar el siguiente salto para el mensaje: %(exc)s" -#: nova/cells/messaging.py:437 +#: nova/cells/messaging.py:436 #, python-format msgid "Failed to send message to cell: %(next_hop)s: %(exc)s" msgstr "No se ha podido enviar el mensaje a la célula: %(next_hop)s: %(exc)s" -#: nova/cells/messaging.py:516 +#: nova/cells/messaging.py:515 #, python-format msgid "Error locating next hops for message: %(exc)s" msgstr "Error al localizar los saltos siguientes para el mensaje: %(exc)s" -#: nova/cells/messaging.py:536 +#: nova/cells/messaging.py:535 #, python-format msgid "Error sending message to next hops: %(exc)s" msgstr "Error al enviar el mensaje a los saltos siguientes: %(exc)s" -#: nova/cells/messaging.py:554 +#: nova/cells/messaging.py:553 #, python-format msgid "Error waiting for responses from neighbor cells: %(exc)s" msgstr "Error al esperar respuestas de células vecinas: %(exc)s" -#: nova/cells/messaging.py:665 +#: nova/cells/messaging.py:664 #, python-format msgid "Unknown method '%(method)s' in compute API" msgstr "Método desconocido '%(method)s' en API de cálculo" -#: nova/cells/messaging.py:1103 +#: nova/cells/messaging.py:1106 #, python-format msgid "Got message to create instance fault: %(instance_fault)s" msgstr "Se ha obtenido mensaje para crear error de instancia: %(instance_fault)s" -#: nova/cells/messaging.py:1126 +#: nova/cells/messaging.py:1129 #, python-format msgid "" "Forcing a sync of instances, project_id=%(projid_str)s, " @@ -4273,21 +4081,21 @@ msgstr "" "Forzando una sincronización de instancias, project_id=%(projid_str)s, " "updated_since=%(since_str)s" -#: nova/cells/messaging.py:1205 +#: nova/cells/messaging.py:1208 #, python-format msgid "No match when trying to update BDM: %(bdm)s" msgstr "No se encontró resultado al intentar actualizar BDM: %(bdm)s" -#: nova/cells/messaging.py:1680 +#: nova/cells/messaging.py:1683 #, python-format msgid "No cell_name for %(method)s() from API" msgstr "No hay cell_name para %(method)s() desde la API" -#: nova/cells/messaging.py:1697 +#: nova/cells/messaging.py:1700 msgid "No cell_name for instance update from API" msgstr "No hay cell_name para actualización de instancia desde la API" -#: nova/cells/messaging.py:1860 +#: nova/cells/messaging.py:1863 #, python-format msgid "Returning exception %s to caller" msgstr "Devolviendo excepción %s al interlocutor" @@ -4300,16 +4108,16 @@ msgstr "Fallo al notificar las celdas de actualización/creación de BDM." msgid "Failed to notify cells of BDM destroy." msgstr "Fallo al notiifcar las celdas de destrucción de BDM" -#: nova/cells/scheduler.py:192 +#: nova/cells/scheduler.py:191 #, python-format msgid "Couldn't communicate with cell '%s'" msgstr "No se puede comunicar con la celda '%s'" -#: nova/cells/scheduler.py:196 +#: nova/cells/scheduler.py:195 msgid "Couldn't communicate with any cells" msgstr "No se puede establecer comunicación con alguna celda" -#: nova/cells/scheduler.py:234 +#: nova/cells/scheduler.py:233 #, python-format msgid "" "No cells available when scheduling. Will retry in %(sleep_time)s " @@ -4318,17 +4126,22 @@ msgstr "" "No hay celdas disponibles al planificar. Se reintentará dentro de " "%(sleep_time)s segundo(s)" -#: nova/cells/scheduler.py:240 +#: nova/cells/scheduler.py:239 #, python-format msgid "Error scheduling instances %(instance_uuids)s" msgstr "Error al planificar instancias %(instance_uuids)s" -#: nova/cells/state.py:352 +#: nova/cells/state.py:182 +#, python-format +msgid "DB error: %s" +msgstr "Error de base de datos: %s" + +#: nova/cells/state.py:363 #, python-format msgid "Unknown cell '%(cell_name)s' when trying to update capabilities" msgstr "Célula '%(cell_name)s' desconocida al intentar actualizar prestaciones" -#: nova/cells/state.py:367 +#: nova/cells/state.py:378 #, python-format msgid "Unknown cell '%(cell_name)s' when trying to update capacities" msgstr "Célula '%(cell_name)s' desconocida al intentar actualizar capacidades" @@ -4465,19 +4278,19 @@ msgstr "Ha fallado la instrucción, por favor compruebe el log para más informa msgid "No db access allowed in nova-compute: %s" msgstr "No se permite acceso a la base de datos en nova-compute: %s" -#: nova/cmd/dhcpbridge.py:109 +#: nova/cmd/dhcpbridge.py:108 #, python-format msgid "No db access allowed in nova-dhcpbridge: %s" msgstr "No se permite acceso a la base de datos en nova-dhcpbridge: %s" -#: nova/cmd/dhcpbridge.py:132 +#: nova/cmd/dhcpbridge.py:131 #, python-format msgid "Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'" msgstr "" "Se han llamado '%(action)s' para la mac '%(mac)s' con dirección IP " "'%(ip)s'" -#: nova/cmd/dhcpbridge.py:142 +#: nova/cmd/dhcpbridge.py:141 msgid "Environment variable 'NETWORK_ID' must be set." msgstr "La variable de entorno 'NETWORK_ID' debe ser establecida." @@ -4812,16 +4625,16 @@ msgstr "No hay entradas de nova en el registro de sistema!" msgid "No db access allowed in nova-network: %s" msgstr "No se permite acceso a base de datos en nova-network: %s" -#: nova/compute/api.py:353 +#: nova/compute/api.py:355 msgid "Cannot run any more instances of this type." msgstr "No se pueden ejecutar más instancias de este tipo. " -#: nova/compute/api.py:360 +#: nova/compute/api.py:362 #, python-format msgid "Can only run %s more instances of this type." msgstr "Sólo se pueden ejecutar %s instancias más de este tipo. " -#: nova/compute/api.py:372 +#: nova/compute/api.py:374 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d " @@ -4830,7 +4643,7 @@ msgstr "" "Cuota %(overs)s excedida para %(pid)s, intentando ejecutar %(min_count)d " "intsancias. %(msg)s" -#: nova/compute/api.py:376 +#: nova/compute/api.py:378 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d " @@ -4839,58 +4652,27 @@ msgstr "" "Cuota %(overs)s excedida para %(pid)s, intentando ejecutar entre " "%(min_count)d y %(max_count)d instancias. %(msg)s" -#: nova/compute/api.py:397 +#: nova/compute/api.py:399 msgid "Metadata type should be dict." msgstr "El tipo de metadato debería ser dict." -#: nova/compute/api.py:403 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" -msgstr "" -"Se ha superado la cuota para %(pid)s, se ha intentado definir " -"%(num_metadata)s propiedades de metadatos" - -#: nova/compute/api.py:415 -#, python-format -msgid "Metadata property key '%s' is not a string." -msgstr "La clave de propiedad de los metadatos '%s' no es una cadena." - -#: nova/compute/api.py:418 -#, python-format -msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string." -msgstr "" -"El valor del atributo de metadatos '%(v)s' para la clave '%(k)s' no es " -"una cadena." - -#: nova/compute/api.py:422 -msgid "Metadata property key blank" -msgstr "Clave de propiedad de metadatos en blanco" - -#: nova/compute/api.py:425 +#: nova/compute/api.py:421 msgid "Metadata property key greater than 255 characters" msgstr "Clave de propiedad metadatos de más de 255 caracteres " -#: nova/compute/api.py:428 +#: nova/compute/api.py:424 msgid "Metadata property value greater than 255 characters" msgstr "Valor de propiedad de metadatos de más de 255 caracteres " -#: nova/compute/api.py:565 -msgid "Failed to set instance name using multi_instance_display_name_template." -msgstr "" -"Se ha encontrado un error en la definición del nombre de instancia " -"mediante multi_instance_display_name_template." - -#: nova/compute/api.py:667 +#: nova/compute/api.py:663 msgid "Cannot attach one or more volumes to multiple instances" msgstr "No se pueden conectar uno o más volúmenes a varias instancias" -#: nova/compute/api.py:709 +#: nova/compute/api.py:705 msgid "The requested availability zone is not available" msgstr "La zona de disponibilidad solicitada no está disponible" -#: nova/compute/api.py:1110 +#: nova/compute/api.py:1107 msgid "" "Images with destination_type 'volume' need to have a non-zero size " "specified" @@ -4898,13 +4680,13 @@ msgstr "" "Las imágenes con destination_type 'colume? necesitan tener un tamaño " "especificado diferente a cero" -#: nova/compute/api.py:1141 +#: nova/compute/api.py:1138 msgid "More than one swap drive requested." msgstr "Más de un controlador de intercambio ha sido solicitado." -#: nova/compute/api.py:1290 -#: nova/tests/api/openstack/compute/test_servers.py:3145 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2484 +#: nova/compute/api.py:1277 +#: nova/tests/api/openstack/compute/test_servers.py:3199 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2488 msgid "" "Unable to launch multiple instances with a single configured port ID. " "Please launch your instance one by one with different ports." @@ -4913,41 +4695,33 @@ msgstr "" "puerto configurado. Por favor lanza tu instancia una por una con puertos " "diferentes." -#: nova/compute/api.py:1311 +#: nova/compute/api.py:1298 msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "" -#: nova/compute/api.py:1415 +#: nova/compute/api.py:1404 msgid "instance termination disabled" msgstr "terminación de instancia inhabilitada" -#: nova/compute/api.py:1430 +#: nova/compute/api.py:1418 #, python-format msgid "Working on deleting snapshot %s from shelved instance..." msgstr "" "Trabajando en la remoción de la instantánea %s de la instancia " "almacenada..." -#: nova/compute/api.py:1437 +#: nova/compute/api.py:1425 #, python-format msgid "Failed to delete snapshot from shelved instance (%s)." msgstr "Fallo al remover la instantánea de la instancia almacenada (%s)." -#: nova/compute/api.py:1441 -msgid "" -"Something wrong happened when trying to delete snapshot from shelved " -"instance." -msgstr "" -"Algo malo ha pasado al intentar eliminar la instantánea de la imagen " -"almacenada." - -#: nova/compute/api.py:1506 +#: nova/compute/api.py:1486 msgid "Instance is already in deleting state, ignoring this request" msgstr "" "La instancia ya se encuentra en estado de remoción, ignorando esta " "solicitud" -#: nova/compute/api.py:1553 +#: nova/compute/api.py:1521 #, python-format msgid "" "Found an unconfirmed migration during delete, id: %(id)s, status: " @@ -4956,109 +4730,104 @@ msgstr "" "Se ha encontrado una migración no confirmada durante la remoción, " "identificador: %(id)s, estado: %(status)s" -#: nova/compute/api.py:1563 +#: nova/compute/api.py:1531 msgid "Instance may have been confirmed during delete" msgstr "la instanacia debe haber sido confirmada durante la remoción" -#: nova/compute/api.py:1580 +#: nova/compute/api.py:1548 #, python-format msgid "Migration %s may have been confirmed during delete" msgstr "La migración %s debe haber sido conifrmada durante la remoción" -#: nova/compute/api.py:1615 +#: nova/compute/api.py:1583 #, python-format msgid "Flavor %d not found" msgstr "El sabor %d no ha sido encontrado" -#: nova/compute/api.py:1633 +#: nova/compute/api.py:1603 #, python-format msgid "instance's host %s is down, deleting from database" msgstr "el host de la instancia %s está inactivos, se suprime de la base de datos" -#: nova/compute/api.py:1660 +#: nova/compute/api.py:1630 #, python-format msgid "Ignoring volume cleanup failure due to %s" msgstr "Ignorando la anomalía de limpieza de volumen debido a %s " -#: nova/compute/api.py:2061 +#: nova/compute/api.py:2030 #, python-format msgid "snapshot for %s" msgstr "instantánea para %s " -#: nova/compute/api.py:2399 +#: nova/compute/api.py:2368 msgid "Resize to zero disk flavor is not allowed." msgstr "" -#: nova/compute/api.py:2438 +#: nova/compute/api.py:2407 #, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance." msgstr "" "%(overs)s cuota excedida para %(pid)s, se ha intentado redimensionar la " "instancia. " -#: nova/compute/api.py:2613 +#: nova/compute/api.py:2582 msgid "Cannot rescue a volume-backed instance" msgstr "No se puede rescatar una instancia de volume-backed" -#: nova/compute/api.py:2840 +#: nova/compute/api.py:2809 msgid "Volume must be attached in order to detach." msgstr "El volumen debe estar conectado para desconectarse." -#: nova/compute/api.py:2860 +#: nova/compute/api.py:2829 msgid "Old volume is attached to a different instance." msgstr "Volumen antigüo está ligado a una instancia diferente." -#: nova/compute/api.py:2863 +#: nova/compute/api.py:2832 msgid "New volume must be detached in order to swap." msgstr "" "El nuevo volumen debe ser desasociado para poder activar la memoria de " "intercambio." -#: nova/compute/api.py:2866 +#: nova/compute/api.py:2835 msgid "New volume must be the same size or larger." msgstr "El nuevo volumen debe ser del mismo o de mayor tamaño." -#: nova/compute/api.py:3067 +#: nova/compute/api.py:3042 #, python-format msgid "Instance compute service state on %s expected to be down, but it was up." msgstr "" "El estado de la instancia del servicio de cómputo en %s debería ser " "inactivo, pero se encontraba activo." -#: nova/compute/api.py:3369 +#: nova/compute/api.py:3347 msgid "Host aggregate is not empty" msgstr "El agregado de anfitrión no está vacío" -#: nova/compute/api.py:3402 +#: nova/compute/api.py:3380 #, python-format msgid "More than 1 AZ for host %s" msgstr "" -#: nova/compute/api.py:3437 +#: nova/compute/api.py:3415 #, python-format msgid "Host already in availability zone %s" msgstr "Anfitrión actualmente en zona de disponibilidad %s" -#: nova/compute/api.py:3525 nova/tests/compute/test_keypairs.py:135 +#: nova/compute/api.py:3503 nova/tests/compute/test_keypairs.py:137 msgid "Keypair name contains unsafe characters" msgstr "El nombre de par de claves contiene caracteres no seguros" -#: nova/compute/api.py:3529 nova/tests/compute/test_keypairs.py:127 -#: nova/tests/compute/test_keypairs.py:131 -msgid "Keypair name must be between 1 and 255 characters long" -msgstr "El nombre de par de claves debe tener entre 1 y 255 caracteres de longitud" +#: nova/compute/api.py:3509 nova/tests/compute/test_keypairs.py:127 +#: nova/tests/compute/test_keypairs.py:132 +msgid "Keypair name must be string and between 1 and 255 characters long" +msgstr "" -#: nova/compute/api.py:3617 +#: nova/compute/api.py:3597 #, python-format msgid "Security group %s is not a string or unicode" msgstr "El grupo de seguridad %s no es una serie o Unicode " -#: nova/compute/api.py:3620 -#, python-format -msgid "Security group %s cannot be empty." -msgstr "El grupo de seguridad %s no puede estar vacío." - -#: nova/compute/api.py:3628 +#: nova/compute/api.py:3607 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " @@ -5067,58 +4836,49 @@ msgstr "" "El valor (%(value)s) para el parámetro Group%(property)s es inválido. El " "contenido se limita a '%(allowed)s'." -#: nova/compute/api.py:3634 -#, python-format -msgid "Security group %s should not be greater than 255 characters." -msgstr "El grupo de seguridad %s no debe tener más de 255 caracteres. " - -#: nova/compute/api.py:3652 +#: nova/compute/api.py:3627 msgid "Quota exceeded, too many security groups." msgstr "Cuota superada, demasiados grupos de seguridad. " -#: nova/compute/api.py:3655 +#: nova/compute/api.py:3630 #, python-format msgid "Create Security Group %s" msgstr "Crear Grupo de Seguridad %s" -#: nova/compute/api.py:3667 +#: nova/compute/api.py:3642 #, python-format msgid "Security group %s already exists" msgstr "El grupo de seguridad %s ya existe" -#: nova/compute/api.py:3680 +#: nova/compute/api.py:3655 #, python-format msgid "Unable to update system group '%s'" msgstr "Incapaz de actualizar el grupo de sistema '%s'" -#: nova/compute/api.py:3742 +#: nova/compute/api.py:3717 #, python-format msgid "Unable to delete system group '%s'" msgstr "No se ha podido suprimir el grupo de sistemas '%s'" -#: nova/compute/api.py:3747 +#: nova/compute/api.py:3722 msgid "Security group is still in use" msgstr "El grupo de seguridad aún se está utilizando" -#: nova/compute/api.py:3757 -msgid "Failed to update usages deallocating security group" -msgstr "No se han podido actualizar los usos desasignando el grupo de seguridad " - -#: nova/compute/api.py:3760 +#: nova/compute/api.py:3735 #, python-format msgid "Delete security group %s" msgstr "Borrar grupo de seguridad %s" -#: nova/compute/api.py:3836 nova/compute/api.py:3919 +#: nova/compute/api.py:3811 nova/compute/api.py:3894 #, python-format msgid "Rule (%s) not found" msgstr "No se ha encontrado la regla (%s)" -#: nova/compute/api.py:3852 +#: nova/compute/api.py:3827 msgid "Quota exceeded, too many security group rules." msgstr "Cuota superada, demasiadas reglas de grupo de seguridad " -#: nova/compute/api.py:3855 +#: nova/compute/api.py:3830 #, python-format msgid "" "Security group %(name)s added %(protocol)s ingress " @@ -5127,7 +4887,7 @@ msgstr "" "Grupo de seguridad %(name)s ha agregado %(protocol)s al ingreso " "(%(from_port)s:%(to_port)s)" -#: nova/compute/api.py:3870 +#: nova/compute/api.py:3845 #, python-format msgid "" "Security group %(name)s removed %(protocol)s ingress " @@ -5136,60 +4896,52 @@ msgstr "" "El grupo de seguridad %(name)s ha removido %(protocol)s del ingreso " "(%(from_port)s:%(to_port)s)" -#: nova/compute/api.py:3926 +#: nova/compute/api.py:3901 msgid "Security group id should be integer" msgstr "El id de grupo de seguridad debe ser un entero" -#: nova/compute/claims.py:135 +#: nova/compute/claims.py:126 #, python-format -msgid "" -"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs " -"%(vcpus)d" +msgid "Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB" msgstr "" -"Intentando reclamación: memoria %(memory_mb)d MB, disco %(disk_gb)d GB, " -"VCPU %(vcpus)d" -#: nova/compute/claims.py:150 +#: nova/compute/claims.py:140 msgid "Claim successful" msgstr "Reclamación satisfactoria" -#: nova/compute/claims.py:153 +#: nova/compute/claims.py:143 msgid "memory" msgstr "memoria" -#: nova/compute/claims.py:162 +#: nova/compute/claims.py:152 msgid "disk" msgstr "Disco" -#: nova/compute/claims.py:177 nova/compute/claims.py:249 +#: nova/compute/claims.py:167 nova/compute/claims.py:230 msgid "Claim pci failed." msgstr "Reclamación pci fallida." -#: nova/compute/claims.py:180 -msgid "CPUs" -msgstr "CPUs" - -#: nova/compute/claims.py:192 +#: nova/compute/claims.py:177 #, python-format msgid "Total %(type)s: %(total)d %(unit)s, used: %(used).02f %(unit)s" msgstr "%(type)s totales: %(total)d %(unit)s utilizados: %(used).02f %(unit)s" -#: nova/compute/claims.py:199 +#: nova/compute/claims.py:184 #, python-format msgid "%(type)s limit not specified, defaulting to unlimited" msgstr "Límite de %(type)s no especificado, predeterminando a ilimitado" -#: nova/compute/claims.py:206 +#: nova/compute/claims.py:191 #, python-format msgid "%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f %(unit)s" msgstr "Límite de %(type)s: %(limit).02f %(unit)s, libre: %(free).02f %(unit)s" -#: nova/compute/claims.py:212 +#: nova/compute/claims.py:197 #, python-format msgid "Free %(type)s %(free).02f %(unit)s < requested %(requested)d %(unit)s" msgstr "Libres %(type)s %(free).02f %(unit)s < solicitados %(requested)d %(unit)s" -#: nova/compute/flavors.py:109 +#: nova/compute/flavors.py:110 msgid "" "Flavor names can only contain alphanumeric characters, periods, dashes, " "underscores and spaces." @@ -5197,13 +4949,13 @@ msgstr "" "Los nombres de los sabores solamente puede contener caracteres " "alfanumericos, puntos, guión, guión bajo y espacios." -#: nova/compute/flavors.py:119 +#: nova/compute/flavors.py:120 msgid "id cannot contain leading and/or trailing whitespace(s)" msgstr "" "El identificador no puede contener espacio(s) vacío(s) en su inicio o " "final" -#: nova/compute/flavors.py:129 +#: nova/compute/flavors.py:130 msgid "" "Flavor id can only contain letters from A-Z (both cases), periods, " "dashes, underscores and spaces." @@ -5211,26 +4963,16 @@ msgstr "" "El identificador de sabor solo puede contener letras de la A-Z " "(mayúsculas y minúsculas), puntos, guión, guión bajo y espacios." -#: nova/compute/flavors.py:150 +#: nova/compute/flavors.py:151 #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "El argumento 'rxtx_factor' debe ser un flotante entre 0 y %g" -#: nova/compute/flavors.py:161 +#: nova/compute/flavors.py:162 msgid "is_public must be a boolean" msgstr "is_public debe ser un booleano" -#: nova/compute/flavors.py:166 -#, python-format -msgid "DB error: %s" -msgstr "Error de base de datos: %s" - -#: nova/compute/flavors.py:177 -#, python-format -msgid "Instance type %s not found for deletion" -msgstr "No se ha encontrado el tipo de instancia %s para suprimirse" - -#: nova/compute/flavors.py:327 +#: nova/compute/flavors.py:328 msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." @@ -5238,26 +4980,21 @@ msgstr "" "Los nombres de las claves solo pueden contener caracteres alfanuméricos, " "punto, guión, guión bajo, dos puntos y espacios." -#: nova/compute/manager.py:283 +#: nova/compute/manager.py:284 #, python-format msgid "Task possibly preempted: %s" msgstr "Tarea posiblemente preapropiada: %s" -#: nova/compute/manager.py:365 nova/compute/manager.py:2885 -#, python-format -msgid "Error while trying to clean up image %s" -msgstr "Error al intentar limpiar imagen %s" - -#: nova/compute/manager.py:506 +#: nova/compute/manager.py:508 msgid "Instance event failed" msgstr "El evento de instancia ha fallado" -#: nova/compute/manager.py:605 +#: nova/compute/manager.py:608 #, python-format msgid "%s is not a valid node managed by this compute host." msgstr "%s no es un nodo válido administrado por este anfitrión de cómputo." -#: nova/compute/manager.py:704 +#: nova/compute/manager.py:714 #, python-format msgid "" "Deleting instance as its host (%(instance_host)s) is not equal to our " @@ -5266,11 +5003,11 @@ msgstr "" "Suprimiendo instancia porque el host (%(instance_host)s) no es igual a " "nuestro host (%(our_host)s)." -#: nova/compute/manager.py:719 +#: nova/compute/manager.py:729 msgid "Instance has been marked deleted already, removing it from the hypervisor." msgstr "La instancia ya ha sido marcada como eliminada, removiendo del hipervisor." -#: nova/compute/manager.py:739 +#: nova/compute/manager.py:749 msgid "" "Hypervisor driver does not support instance shared storage check, " "assuming it's not on shared storage" @@ -5278,15 +5015,7 @@ msgstr "" "El hipervisor no soporta la validación de almacenamiento compartido entre" " instancias, asumiendo que no se encuentra en almacenamiento compartido." -#: nova/compute/manager.py:745 -msgid "Failed to check if instance shared" -msgstr "Fallo al verificar si la instancia se encuentra compartida" - -#: nova/compute/manager.py:811 nova/compute/manager.py:862 -msgid "Failed to complete a deletion" -msgstr "Fallo durante la compleción una remoción" - -#: nova/compute/manager.py:844 +#: nova/compute/manager.py:854 msgid "" "Service started deleting the instance during the previous run, but did " "not finish. Restarting the deletion now." @@ -5294,7 +5023,7 @@ msgstr "" "El servicio ha iniciado la remoción de la instancia durante la ejecución " "previa, pero no ha finalizado. Reiniciando la remoción ahora." -#: nova/compute/manager.py:885 +#: nova/compute/manager.py:895 #, python-format msgid "" "Instance in transitional state (%(task_state)s) at start-up and power " @@ -5303,105 +5032,81 @@ msgstr "" "Instancia en estado transicional (%(task_state)s) al arranque y estado de" " energía es (%(power_state)s), limpiando el estado de la tarea" -#: nova/compute/manager.py:903 -msgid "Failed to stop instance" -msgstr "Fallo al detener instancia" - -#: nova/compute/manager.py:915 -msgid "Failed to start instance" -msgstr "Fallo al iniciar instancia" - -#: nova/compute/manager.py:940 -msgid "Failed to revert crashed migration" -msgstr "Se ha encontrado un error en al revertir la migración colgada" - -#: nova/compute/manager.py:943 +#: nova/compute/manager.py:953 msgid "Instance found in migrating state during startup. Resetting task_state" msgstr "" "Se ha encontrado una instancia en estado de migración durante el inicio. " "Restableciendo task_state" -#: nova/compute/manager.py:960 +#: nova/compute/manager.py:970 msgid "Rebooting instance after nova-compute restart." msgstr "Rearrancando instancia después de reiniciar nova-compute. " -#: nova/compute/manager.py:970 +#: nova/compute/manager.py:980 msgid "Hypervisor driver does not support resume guests" msgstr "El controlador de hipervisor no soporta reanudar invitados " -#: nova/compute/manager.py:975 +#: nova/compute/manager.py:985 msgid "Failed to resume instance" msgstr "No se ha podido reanudar la instancia" -#: nova/compute/manager.py:984 +#: nova/compute/manager.py:994 msgid "Hypervisor driver does not support firewall rules" msgstr "El controlador de hipervisor no soporta reglas de cortafuegos " -#: nova/compute/manager.py:1009 +#: nova/compute/manager.py:1019 #, python-format msgid "VM %(state)s (Lifecycle Event)" msgstr "" -#: nova/compute/manager.py:1025 +#: nova/compute/manager.py:1035 #, python-format msgid "Unexpected power state %d" msgstr "Estado de alimentación inesperado %d" -#: nova/compute/manager.py:1130 +#: nova/compute/manager.py:1140 msgid "Hypervisor driver does not support security groups." msgstr "El controlador del hipervisor no soporta grupos de seguridad." -#: nova/compute/manager.py:1168 +#: nova/compute/manager.py:1178 #, python-format msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" "El volumen con id: %s ha finalizado su creación pero no ha sido marcado " "como 'disponible'" -#: nova/compute/manager.py:1225 nova/compute/manager.py:1982 +#: nova/compute/manager.py:1235 nova/compute/manager.py:2057 msgid "Success" msgstr "Éxito" -#: nova/compute/manager.py:1249 +#: nova/compute/manager.py:1259 msgid "Instance disappeared before we could start it" msgstr "La instancia ha desaparecido antes de poder iniciarla" -#: nova/compute/manager.py:1276 +#: nova/compute/manager.py:1286 msgid "Anti-affinity instance group policy was violated." msgstr "la política de grupo de anti-afinidad fue violada." -#: nova/compute/manager.py:1353 -msgid "Failed to dealloc network for deleted instance" -msgstr "No se ha podido desasignar la red para la instancia suprimida" - -#: nova/compute/manager.py:1358 +#: nova/compute/manager.py:1369 msgid "Instance disappeared during build" msgstr "La instancia despareció durante su construcción" -#: nova/compute/manager.py:1374 -msgid "Failed to dealloc network for failed instance" -msgstr "Fallo al desasociar red para la instancia fallida" - -#: nova/compute/manager.py:1401 +#: nova/compute/manager.py:1412 #, python-format msgid "Error: %s" msgstr "Error: %s" -#: nova/compute/manager.py:1447 nova/compute/manager.py:3509 -msgid "Error trying to reschedule" -msgstr "Error al intentar volver a programar " - -#: nova/compute/manager.py:1503 +#: nova/compute/manager.py:1514 msgid "Instance build timed out. Set to error state." msgstr "" "La compilación de instancia ha excedido el tiempo de espera. Se ha estado" " en estado erróneo. " -#: nova/compute/manager.py:1513 nova/compute/manager.py:1873 +#: nova/compute/manager.py:1524 nova/compute/manager.py:1888 msgid "Starting instance..." msgstr "Iniciando instancia..." -#: nova/compute/manager.py:1531 +#: nova/compute/manager.py:1542 #, python-format msgid "" "Treating negative config value (%(retries)s) for " @@ -5410,135 +5115,77 @@ msgstr "" "Tratando el valor negativo de configuración (%(retries)s) para " "'network_allocate_retries' como 0." -#: nova/compute/manager.py:1556 -#, python-format -msgid "Instance failed network setup after %(attempts)d attempt(s)" -msgstr "" -"La configuración de red de la instancia falló después de %(attempts)d " -"intento(s)" - -#: nova/compute/manager.py:1560 +#: nova/compute/manager.py:1571 #, python-format msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" "Fallo de configuración de red de la instancia (intento %(attempt)d de " "%(attempts)d)" -#: nova/compute/manager.py:1741 -msgid "Instance failed block device setup" -msgstr "Ha fallado la configuración de dispositivo de bloque en la instancia" - -#: nova/compute/manager.py:1761 nova/compute/manager.py:2098 -#: nova/compute/manager.py:4041 -msgid "Instance failed to spawn" -msgstr "La instancia no se ha podido generar" - -#: nova/compute/manager.py:1941 -msgid "Unexpected build failure, not rescheduling build." -msgstr "Fallo de compilación inesperado, no se reprogramará la compilación." - -#: nova/compute/manager.py:2006 +#: nova/compute/manager.py:2020 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2012 nova/compute/manager.py:2060 -msgid "Failed to allocate network(s)" -msgstr "Fallo al asociar red(es)" - -#: nova/compute/manager.py:2016 nova/compute/manager.py:2062 +#: nova/compute/manager.py:2030 nova/compute/manager.py:2080 msgid "Failed to allocate the network(s), not rescheduling." msgstr "Fallo al asociar la(s) red(es), no se reprogramará." -#: nova/compute/manager.py:2086 -msgid "Failure prepping block device" -msgstr "Fallo al preparar el dispositivo de bloques" - -#: nova/compute/manager.py:2088 +#: nova/compute/manager.py:2106 msgid "Failure prepping block device." msgstr "Fallo al preparar el dispositivo de bloque." -#: nova/compute/manager.py:2111 +#: nova/compute/manager.py:2127 msgid "Could not clean up failed build, not rescheduling" msgstr "No se puede limpiar la compilación fallida, no se reprogramará." -#: nova/compute/manager.py:2121 -msgid "Failed to deallocate networks" -msgstr "Fallo al desasociar redes" - -#: nova/compute/manager.py:2142 -msgid "Failed to cleanup volumes for failed build, not rescheduling" -msgstr "" -"Fallo al limpiar los volúmenes para la compilación fallida, no se " -"reprogramará" - -#: nova/compute/manager.py:2181 +#: nova/compute/manager.py:2185 msgid "Failed to deallocate network for instance." msgstr "Se ha encontrado un error al desasignar la red para la instancia" -#: nova/compute/manager.py:2202 +#: nova/compute/manager.py:2206 #, python-format msgid "%(action_str)s instance" msgstr "%(action_str)s instancia" -#: nova/compute/manager.py:2246 -#, python-format -msgid "Ignoring DiskNotFound: %s" -msgstr "Ignorando DiskNotFound: %s" - -#: nova/compute/manager.py:2249 -#, python-format -msgid "Ignoring VolumeNotFound: %s" -msgstr "Ignorando VolumeNotFound: %s" - -#: nova/compute/manager.py:2353 +#: nova/compute/manager.py:2361 msgid "Instance disappeared during terminate" msgstr "La instancia ha desaparecido durante la terminación" -#: nova/compute/manager.py:2359 nova/compute/manager.py:3689 -#: nova/compute/manager.py:5769 -msgid "Setting instance vm_state to ERROR" -msgstr "Estableciendo el vm_state de la instancia a ERROR" - -#: nova/compute/manager.py:2539 +#: nova/compute/manager.py:2547 msgid "Rebuilding instance" msgstr "Volver a crear instancia" -#: nova/compute/manager.py:2552 +#: nova/compute/manager.py:2560 msgid "Invalid state of instance files on shared storage" msgstr "Estado no válido de archivos de instancia en almacenamiento compartido" -#: nova/compute/manager.py:2556 +#: nova/compute/manager.py:2564 msgid "disk on shared storage, recreating using existing disk" msgstr "" "disco en almacenamiento compartido, volviendo a crear utilizando disco " "existente" -#: nova/compute/manager.py:2560 +#: nova/compute/manager.py:2568 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "El disco on está en almacenamiento compartido, reconstruyendo desde: '%s'" -#: nova/compute/manager.py:2571 nova/compute/manager.py:4884 -#, python-format -msgid "Failed to get compute_info for %s" -msgstr "Fallo al obtener compute_info para %s" - -#: nova/compute/manager.py:2647 +#: nova/compute/manager.py:2655 #, python-format msgid "bringing vm to original state: '%s'" msgstr "poniendo vm en estado original: '%s'" -#: nova/compute/manager.py:2678 +#: nova/compute/manager.py:2686 #, python-format msgid "Detaching from volume api: %s" msgstr "Desconectando de la API del volumen: %s" -#: nova/compute/manager.py:2705 +#: nova/compute/manager.py:2713 msgid "Rebooting instance" msgstr "Rearrancando instancia" -#: nova/compute/manager.py:2722 +#: nova/compute/manager.py:2730 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " @@ -5547,24 +5194,24 @@ msgstr "" "intentando rearrancar una instancia que no se está ejecutando: (estado: " "%(state)s se esperaba: %(running)s)" -#: nova/compute/manager.py:2758 +#: nova/compute/manager.py:2766 msgid "Reboot failed but instance is running" msgstr "Ha fallado el reinicio pero la instancia se mantiene en ejecución" -#: nova/compute/manager.py:2766 +#: nova/compute/manager.py:2774 #, python-format msgid "Cannot reboot instance: %s" msgstr "No se puede reiniciar instancia: %s" -#: nova/compute/manager.py:2778 +#: nova/compute/manager.py:2786 msgid "Instance disappeared during reboot" msgstr "La instancia ha desaparecido durante el rearranque" -#: nova/compute/manager.py:2846 +#: nova/compute/manager.py:2854 msgid "instance snapshotting" msgstr "creación de instantánea de instancia" -#: nova/compute/manager.py:2852 +#: nova/compute/manager.py:2860 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " @@ -5573,37 +5220,37 @@ msgstr "" "intentando hacer una instantánea de una instancia que no se está " "ejecutando: (estado: %(state)s se esperaba: %(running)s)" -#: nova/compute/manager.py:2890 +#: nova/compute/manager.py:2893 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "Error al intentar limpiar imagen %s" + +#: nova/compute/manager.py:2898 msgid "Image not found during snapshot" msgstr "No se ha encontrado la imagen durante la instantánea" -#: nova/compute/manager.py:2972 +#: nova/compute/manager.py:2980 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "" "No se ha podido establecer contraseña de administrador. La instancia %s " "no está ejecutando" -#: nova/compute/manager.py:2979 +#: nova/compute/manager.py:2987 msgid "Root password set" msgstr "Contraseña raíz establecida" -#: nova/compute/manager.py:2984 +#: nova/compute/manager.py:2992 msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "esta instancia de invitado o controlador no implementa set_admin_password" " ." -#: nova/compute/manager.py:2997 -#, python-format -msgid "set_admin_password failed: %s" -msgstr "set_admin_password ha fallado: %s" - -#: nova/compute/manager.py:3003 +#: nova/compute/manager.py:3011 msgid "error setting admin password" msgstr "error al establecer contraseña de administrador" -#: nova/compute/manager.py:3019 +#: nova/compute/manager.py:3027 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " @@ -5612,12 +5259,12 @@ msgstr "" "intentando inyectar un archivo hacia un inactivo (estado: " "%(current_state)s esperado: %(expected_state)s)" -#: nova/compute/manager.py:3024 +#: nova/compute/manager.py:3032 #, python-format msgid "injecting file to %s" msgstr "inyectando archivo a %s" -#: nova/compute/manager.py:3042 +#: nova/compute/manager.py:3050 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" @@ -5625,34 +5272,30 @@ msgstr "" "No se ha podido encontrar una imagen diferente para utilizarla para VM de" " rescate, se utiliza la imagen actual de la instancia" -#: nova/compute/manager.py:3061 +#: nova/compute/manager.py:3069 msgid "Rescuing" msgstr "Rescatando" -#: nova/compute/manager.py:3082 -msgid "Error trying to Rescue Instance" -msgstr "Error al intentar Rescatar Instancia" - -#: nova/compute/manager.py:3086 +#: nova/compute/manager.py:3094 #, python-format msgid "Driver Error: %s" msgstr "Error de dispositivo: %s" -#: nova/compute/manager.py:3109 +#: nova/compute/manager.py:3117 msgid "Unrescuing" msgstr "Cancelando rescate" -#: nova/compute/manager.py:3180 +#: nova/compute/manager.py:3188 #, python-format msgid "Migration %s is not found during confirmation" msgstr "La migración %s no ha sido encontrada durante la confirmación" -#: nova/compute/manager.py:3185 +#: nova/compute/manager.py:3193 #, python-format msgid "Migration %s is already confirmed" msgstr "La migración %s ya ha sido confirmada" -#: nova/compute/manager.py:3189 +#: nova/compute/manager.py:3197 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " @@ -5661,118 +5304,86 @@ msgstr "" "Estado de confirmación inesperado '%(status)s' de la migración %(id)s, " "salir del proceso de confirmación" -#: nova/compute/manager.py:3203 +#: nova/compute/manager.py:3211 msgid "Instance is not found during confirmation" msgstr "La instancia no ha sido encontrada durante la confirmación" -#: nova/compute/manager.py:3384 +#: nova/compute/manager.py:3392 #, python-format msgid "Updating instance to original state: '%s'" msgstr "Actualizando el estado original de instancia hacia: '%s'" -#: nova/compute/manager.py:3407 +#: nova/compute/manager.py:3415 msgid "Instance has no source host" msgstr "La instancia no tiene ningún host de origen" -#: nova/compute/manager.py:3413 +#: nova/compute/manager.py:3421 msgid "destination same as source!" msgstr "destino igual que origen" -#: nova/compute/manager.py:3431 +#: nova/compute/manager.py:3439 msgid "Migrating" msgstr "Migrando" -#: nova/compute/manager.py:3695 -#, python-format -msgid "Failed to rollback quota for failed finish_resize: %s" -msgstr "Fallo al revertir las cuotas para un finish_resize fallido: %s" - -#: nova/compute/manager.py:3755 +#: nova/compute/manager.py:3771 msgid "Pausing" msgstr "Poniéndose en pausa" -#: nova/compute/manager.py:3772 +#: nova/compute/manager.py:3788 msgid "Unpausing" msgstr "Cancelando la pausa" -#: nova/compute/manager.py:3813 nova/compute/manager.py:3830 +#: nova/compute/manager.py:3829 nova/compute/manager.py:3846 msgid "Retrieving diagnostics" msgstr "Recuperando diagnósticos" -#: nova/compute/manager.py:3866 +#: nova/compute/manager.py:3882 msgid "Resuming" msgstr "Reanudando" -#: nova/compute/manager.py:4084 +#: nova/compute/manager.py:4102 msgid "Get console output" msgstr "Obtener salida de consola " -#: nova/compute/manager.py:4283 +#: nova/compute/manager.py:4301 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "Conectando el volumen %(volume_id)s a %(mountpoint)s" -#: nova/compute/manager.py:4292 -#, python-format -msgid "Failed to attach %(volume_id)s at %(mountpoint)s" -msgstr "Fallo al asociar %(volume_id)s en %(mountpoint)s" - -#: nova/compute/manager.py:4308 +#: nova/compute/manager.py:4326 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "Desconectar el volumen %(volume_id)s del punto de montaje %(mp)s" -#: nova/compute/manager.py:4319 +#: nova/compute/manager.py:4337 msgid "Detaching volume from unknown instance" msgstr "Desconectando volumen de instancia desconocida " -#: nova/compute/manager.py:4331 -#, python-format -msgid "Failed to detach volume %(volume_id)s from %(mp)s" -msgstr "No se ha podido desconectar el volumen %(volume_id)s de %(mp)s" - -#: nova/compute/manager.py:4404 -#, python-format -msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" -msgstr "Fallo para intercambiar volúmen %(old_volume_id)s por %(new_volume_id)s" - -#: nova/compute/manager.py:4411 -#, python-format -msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" -msgstr "" -"Fallo al conectar hacia al volúmen %(volume_id)s con el volumen en " -"%(mountpoint)s" - -#: nova/compute/manager.py:4504 +#: nova/compute/manager.py:4525 #, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "allocate_port_for_instance ha regresado %(ports)s puertos" -#: nova/compute/manager.py:4524 +#: nova/compute/manager.py:4549 #, python-format msgid "Port %s is not attached" msgstr "El puerto %s no se encuentra asignado" -#: nova/compute/manager.py:4536 nova/tests/compute/test_compute.py:10612 +#: nova/compute/manager.py:4561 nova/tests/compute/test_compute.py:10659 #, python-format msgid "Host %s not found" msgstr "No se ha encontrado el host %s" -#: nova/compute/manager.py:4690 -#, python-format -msgid "Pre live migration failed at %s" -msgstr "Previo a migración en vivo falló en %s" - -#: nova/compute/manager.py:4753 +#: nova/compute/manager.py:4779 msgid "_post_live_migration() is started.." msgstr "Se ha iniciado _post_live_migration()." -#: nova/compute/manager.py:4825 +#: nova/compute/manager.py:4855 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "La migración de la instancia hacia %s ha finalizado exitosamente." -#: nova/compute/manager.py:4827 +#: nova/compute/manager.py:4857 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." @@ -5781,15 +5392,15 @@ msgstr "" "encontrado: ningún dominio con un nombre coincidente.\" Este error se " "puede ignorar sin ningún riesgo." -#: nova/compute/manager.py:4852 +#: nova/compute/manager.py:4882 msgid "Post operation of migration started" msgstr "Se ha iniciado la operación posterior de migración" -#: nova/compute/manager.py:5057 +#: nova/compute/manager.py:5087 msgid "An error occurred while refreshing the network cache." msgstr "Ha ocurrido un error al actualizar el cache de red." -#: nova/compute/manager.py:5110 +#: nova/compute/manager.py:5140 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " @@ -5798,12 +5409,12 @@ msgstr "" "Se han encontrado %(migration_count)d migraciones sin confirmar de más de" " %(confirm_window)d segundos" -#: nova/compute/manager.py:5115 +#: nova/compute/manager.py:5145 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "Estableciendo la %(migration_id)s en error: %(reason)s" -#: nova/compute/manager.py:5124 +#: nova/compute/manager.py:5154 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " @@ -5812,32 +5423,28 @@ msgstr "" "Confirmando automáticamente la migración %(migration_id)s para la " "instancia %(instance_uuid)s" -#: nova/compute/manager.py:5134 +#: nova/compute/manager.py:5164 #, python-format msgid "Instance %s not found" msgstr "No se ha encontrado la instancia %s" -#: nova/compute/manager.py:5139 +#: nova/compute/manager.py:5169 msgid "In ERROR state" msgstr "En estado de ERROR " -#: nova/compute/manager.py:5146 +#: nova/compute/manager.py:5176 #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "En los estados %(vm_state)s/%(task_state)s, no REDIMENSIONADO/Ninguno" -#: nova/compute/manager.py:5157 +#: nova/compute/manager.py:5187 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" "Error auto confirmando modificación de tamaño: %s. Se intentará " "posteriormente." -#: nova/compute/manager.py:5186 -msgid "Periodic task failed to offload instance." -msgstr "Tarea periódica falló al descargar instancia." - -#: nova/compute/manager.py:5206 +#: nova/compute/manager.py:5236 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " @@ -5846,20 +5453,15 @@ msgstr "" "Ejecutando auditoría de uso de instancia para %(host)s desde " "%(begin_time)s hasta %(end_time)s. %(number_instances)s instancias." -#: nova/compute/manager.py:5226 -#, python-format -msgid "Failed to generate usage audit for instance on host %s" -msgstr "No se ha podido generar auditoría de uso para la instancia en el host %s " - -#: nova/compute/manager.py:5255 +#: nova/compute/manager.py:5285 msgid "Updating bandwidth usage cache" msgstr "Actualizando memoria caché de uso de ancho de banda" -#: nova/compute/manager.py:5277 +#: nova/compute/manager.py:5307 msgid "Bandwidth usage not supported by hypervisor." msgstr "Uso de ancho de banda no soportado por el hipervisor." -#: nova/compute/manager.py:5400 +#: nova/compute/manager.py:5430 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " @@ -5868,7 +5470,7 @@ msgstr "" "Se han encontrado %(num_db_instances)s en la base de datos y " "%(num_vm_instances)s en el hipervisor." -#: nova/compute/manager.py:5466 +#: nova/compute/manager.py:5496 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" @@ -5877,70 +5479,65 @@ msgstr "" "Durante el proceso sync_power, la instancia se ha movido del host %(src)s" " al host %(dst)s" -#: nova/compute/manager.py:5479 +#: nova/compute/manager.py:5509 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" "Durante sync_power_state la instancia ha dejado una tarea pendiente " "(%(task)s). Omitir." -#: nova/compute/manager.py:5504 +#: nova/compute/manager.py:5534 msgid "Instance shutdown by itself. Calling the stop API." msgstr "Conclusión de instancia por sí misma. Llamando a la API de detención." -#: nova/compute/manager.py:5516 nova/compute/manager.py:5525 -#: nova/compute/manager.py:5556 nova/compute/manager.py:5567 -msgid "error during stop() in sync_power_state." -msgstr "error durante stop() en sync_power_state." - -#: nova/compute/manager.py:5520 +#: nova/compute/manager.py:5553 msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "" "La instancia se ha suspendido inesperadamente. Llamando a la API de " "detención." -#: nova/compute/manager.py:5536 +#: nova/compute/manager.py:5569 msgid "Instance is paused unexpectedly. Ignore." msgstr "La instancia se ha puesto en pausa inesperadamente. Ignorar. " -#: nova/compute/manager.py:5542 +#: nova/compute/manager.py:5575 msgid "Instance is unexpectedly not found. Ignore." msgstr "La instancia no se encuentra inesperadamente. Ignorar. " -#: nova/compute/manager.py:5548 +#: nova/compute/manager.py:5581 msgid "Instance is not stopped. Calling the stop API." msgstr "La instancia no se ha detenido. Llamando a la API de detención." -#: nova/compute/manager.py:5562 +#: nova/compute/manager.py:5595 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" "La instancia pausada se ha apagado a si misma. Llamando la API de " "detención." -#: nova/compute/manager.py:5576 +#: nova/compute/manager.py:5609 msgid "Instance is not (soft-)deleted." msgstr "La instancia no se suprime (de forma no permanente). " -#: nova/compute/manager.py:5605 +#: nova/compute/manager.py:5639 msgid "Reclaiming deleted instance" msgstr "Reclamando instancia suprimida" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5643 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "Reclamación periódica falló al eliminar instancia: %s" -#: nova/compute/manager.py:5634 +#: nova/compute/manager.py:5668 #, python-format msgid "Deleting orphan compute node %s" msgstr "Eliminando nodo de cómputo huérfano %s" -#: nova/compute/manager.py:5642 nova/compute/resource_tracker.py:391 +#: nova/compute/manager.py:5676 nova/compute/resource_tracker.py:406 #, python-format msgid "No service record for host %s" msgstr "Ningún registro de servicio para el host %s " -#: nova/compute/manager.py:5682 +#: nova/compute/manager.py:5716 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " @@ -5950,7 +5547,7 @@ msgstr "" " marcada como ELIMINADA pero todavía se encuentra presente en el " "anfitrión." -#: nova/compute/manager.py:5688 +#: nova/compute/manager.py:5722 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" @@ -5959,15 +5556,15 @@ msgstr "" "Apagando la instancia con nombre '%s' que está marcada como ELIMINADA " "pero sigue presente en el anfitrión." -#: nova/compute/manager.py:5697 +#: nova/compute/manager.py:5731 msgid "set_bootable is not implemented for the current driver" msgstr "set_bootable no está implementado en el controlador actual" -#: nova/compute/manager.py:5702 +#: nova/compute/manager.py:5736 msgid "Failed to power off instance" msgstr "Fallo al apagar la instancia" -#: nova/compute/manager.py:5706 +#: nova/compute/manager.py:5740 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " @@ -5976,27 +5573,27 @@ msgstr "" "Desrtuyendo instancia con etiqueta de nombre '%s' la cual ha sido marcada" " como ELIMINADA pero todavía se encuentra presente en el anfitrión." -#: nova/compute/manager.py:5716 +#: nova/compute/manager.py:5750 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "Limpieza periódica falló al eliminar la instancia: %s" -#: nova/compute/manager.py:5720 +#: nova/compute/manager.py:5754 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valor '%s' no reconocido para CONF.running_deleted_instance_action" -#: nova/compute/manager.py:5752 +#: nova/compute/manager.py:5786 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "Estableciendo la instancia de vuelta a %(state)s tras: %(error)s" -#: nova/compute/manager.py:5762 +#: nova/compute/manager.py:5796 #, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "Marcando la instancia de nuevo como ACTIVA después de: %s" -#: nova/compute/resource_tracker.py:105 +#: nova/compute/resource_tracker.py:111 msgid "" "Host field should not be set on the instance until resources have been " "claimed." @@ -6004,7 +5601,7 @@ msgstr "" "El campo de host no se debe establecer en la instancia hasta que los " "recursos se hayan reclamado." -#: nova/compute/resource_tracker.py:110 +#: nova/compute/resource_tracker.py:116 msgid "" "Node field should not be set on the instance until resources have been " "claimed." @@ -6012,16 +5609,16 @@ msgstr "" "El campo Nodo no debe ser establecido en la instancia hasta que los " "recursos han sido reclamados." -#: nova/compute/resource_tracker.py:272 +#: nova/compute/resource_tracker.py:276 #, python-format msgid "Cannot get the metrics from %s." msgstr "No se pueden obtener las métricas de %s." -#: nova/compute/resource_tracker.py:291 +#: nova/compute/resource_tracker.py:295 msgid "Auditing locally available compute resources" msgstr "Auditando recursos de cálculo disponibles localmente" -#: nova/compute/resource_tracker.py:296 +#: nova/compute/resource_tracker.py:300 msgid "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." @@ -6029,54 +5626,56 @@ msgstr "" "El controlador Virt no soporta 'get_available_resource'. El seguimiento " "de cálculo está inhabilitado." -#: nova/compute/resource_tracker.py:371 +#: nova/compute/resource_tracker.py:375 #, python-format msgid "Compute_service record created for %(host)s:%(node)s" msgstr "Registro compute_service creado para %(host)s:%(node)s" -#: nova/compute/resource_tracker.py:377 +#: nova/compute/resource_tracker.py:381 #, python-format msgid "Compute_service record updated for %(host)s:%(node)s" msgstr "El registro compute_service se ha actualizado para %(host)s:%(node)s" -#: nova/compute/resource_tracker.py:430 +#: nova/compute/resource_tracker.py:446 #, python-format -msgid "Free ram (MB): %s" -msgstr "RAM libre (MB): %s " +msgid "" +"Total physical ram (MB): %(pram)s, total allocated virtual ram (MB): " +"%(vram)s" +msgstr "" -#: nova/compute/resource_tracker.py:431 +#: nova/compute/resource_tracker.py:450 #, python-format msgid "Free disk (GB): %s" msgstr "Disco libre (GB): %s " -#: nova/compute/resource_tracker.py:436 +#: nova/compute/resource_tracker.py:454 #, python-format -msgid "Free VCPUS: %s" -msgstr "VCPUS libres: %s" +msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s" +msgstr "" -#: nova/compute/resource_tracker.py:438 +#: nova/compute/resource_tracker.py:458 msgid "Free VCPU information unavailable" msgstr "Información de VCPU libre no disponible" -#: nova/compute/resource_tracker.py:441 +#: nova/compute/resource_tracker.py:461 #, python-format msgid "PCI stats: %s" msgstr "" -#: nova/compute/resource_tracker.py:486 +#: nova/compute/resource_tracker.py:512 #, python-format msgid "Updating from migration %s" msgstr "Actualizando desde la migración %s" -#: nova/compute/resource_tracker.py:553 +#: nova/compute/resource_tracker.py:577 msgid "Instance not resizing, skipping migration." msgstr "La instancia no se está redimensionando, se salta la migración." -#: nova/compute/resource_tracker.py:568 +#: nova/compute/resource_tracker.py:592 msgid "Flavor could not be found, skipping migration." msgstr "El sabor no puede ser encontrado, omitiendo migración." -#: nova/compute/resource_tracker.py:658 +#: nova/compute/resource_tracker.py:682 #, python-format msgid "" "Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB " @@ -6085,7 +5684,7 @@ msgstr "" "Se ha detectado una instancia huérfana en ejecución: %(uuid)s " "(consumiento %(memory_mb)s MB de memoria)" -#: nova/compute/resource_tracker.py:672 +#: nova/compute/resource_tracker.py:696 #, python-format msgid "Missing keys: %s" msgstr "Faltan claves: %s" @@ -6099,31 +5698,9 @@ msgstr "No se ha especificado ningún host de cálculo" msgid "Unable to find host for Instance %s" msgstr "No se puede encontrar el host para la instancia %s " -#: nova/compute/utils.py:204 -#, python-format -msgid "Can't access image %(image_id)s: %(error)s" -msgstr "No se puede acceder a la imagen %(image_id)s: %(error)s" - -#: nova/compute/utils.py:328 -#, python-format -msgid "" -"No host name specified for the notification of HostAPI.%s and it will be " -"ignored" -msgstr "" -"No ha sido especificado un nombre de anfitrión para la notificación de " -"HostAPI.%s y será ignorada" - -#: nova/compute/utils.py:456 -#, python-format -msgid "" -"Value of 0 or None specified for %s. This behaviour will change in " -"meaning in the K release, to mean 'call at the default rate' rather than " -"'do not call'. To keep the 'do not call' behaviour, use a negative value." +#: nova/compute/stats.py:49 +msgid "Unexpected type adding stats" msgstr "" -"Un valor de 0 o Ninguno especificado para %s. Este comportamiento " -"cambiará en el transcurso de la liberación K, para definir 'llamada en la" -" tasa predeterminada' en lugar de 'no llamar'. Para mantener el " -"comportamiento 'no llamar', utiliza un valor negativo." #: nova/compute/monitors/__init__.py:176 #, python-format @@ -6151,27 +5728,27 @@ msgstr "" "No todas las propiedades necesarias están implementadas en el controlador" " de cómputo: %s" -#: nova/conductor/api.py:318 +#: nova/conductor/api.py:315 msgid "nova-conductor connection established successfully" msgstr "" -#: nova/conductor/api.py:323 +#: nova/conductor/api.py:320 msgid "" "Timed out waiting for nova-conductor. Is it running? Or did this service" " start before nova-conductor? Reattempting establishment of nova-" "conductor connection..." msgstr "" -#: nova/conductor/manager.py:124 +#: nova/conductor/manager.py:123 #, python-format msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s" msgstr "Se intentado actualizar instancia para '%(key)s' en %(instance_uuid)s" -#: nova/conductor/manager.py:523 +#: nova/conductor/manager.py:519 msgid "No valid host found for cold migrate" msgstr "No se ha encontrado anfitrión para migración en frío" -#: nova/conductor/manager.py:586 +#: nova/conductor/manager.py:582 #, python-format msgid "" "Migration of instance %(instance_id)s to host %(dest)s unexpectedly " @@ -6180,22 +5757,22 @@ msgstr "" "La migración de la instancia %(instance_id)s al anfitrión %(dest)s ha " "fallado inesperadamente." -#: nova/conductor/manager.py:673 +#: nova/conductor/manager.py:669 #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Se ha intentado la extracción pero la imagen %s no ha sido encontrada." -#: nova/conductor/manager.py:696 +#: nova/conductor/manager.py:692 msgid "No valid host found for unshelve instance" msgstr "No se ha encontrado anfitrión válido para extraer instancia" -#: nova/conductor/manager.py:700 +#: nova/conductor/manager.py:696 msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED" msgstr "" "Se ha intentado desarchivar pero vm_state no se encuentra como SHELVED o " "SHELVED_OFFLOADED" -#: nova/conductor/manager.py:737 +#: nova/conductor/manager.py:733 msgid "No valid host found for rebuild" msgstr "" @@ -6271,51 +5848,51 @@ msgstr "No se ha podido notificar a las células la destrucción de instancia" msgid "Failed to notify cells of instance update" msgstr "No se ha podido notificar a las células la actualización de instancia" -#: nova/db/api.py:1685 +#: nova/db/api.py:1683 msgid "Failed to notify cells of bw_usage update" msgstr "No se ha podido notificar a las células la actualización de bw_usage" -#: nova/db/sqlalchemy/api.py:204 +#: nova/db/sqlalchemy/api.py:207 #, python-format msgid "Deadlock detected when running '%(func_name)s': Retrying..." msgstr "Punto muerto detectado al ejecutar '%(func_name)s': Reintentando..." -#: nova/db/sqlalchemy/api.py:245 +#: nova/db/sqlalchemy/api.py:248 msgid "model or base_model parameter should be subclass of NovaBase" msgstr "El parámetro model o base_model debe ser una subclase de NovaBase" -#: nova/db/sqlalchemy/api.py:258 -#: nova/openstack/common/db/sqlalchemy/utils.py:174 -#: nova/virt/baremetal/db/sqlalchemy/api.py:60 +#: nova/db/sqlalchemy/api.py:261 +#: nova/openstack/common/db/sqlalchemy/utils.py:173 +#: nova/virt/baremetal/db/sqlalchemy/api.py:61 #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valor de read_deleted no reconocido '%s'" -#: nova/db/sqlalchemy/api.py:750 +#: nova/db/sqlalchemy/api.py:753 #, python-format msgid "Invalid floating ip id %s in request" msgstr "Identificador de dirección IP flotante inválida %s en solicitud" -#: nova/db/sqlalchemy/api.py:855 +#: nova/db/sqlalchemy/api.py:858 msgid "Failed to update usages bulk deallocating floating IP" msgstr "Fallo al actualizar uso de desasignación masiva de IP fotante" -#: nova/db/sqlalchemy/api.py:1011 +#: nova/db/sqlalchemy/api.py:1007 #, python-format msgid "Invalid floating IP %s in request" msgstr "Dirección IP flotante inválida %s en la solicitud" -#: nova/db/sqlalchemy/api.py:1313 nova/db/sqlalchemy/api.py:1352 +#: nova/db/sqlalchemy/api.py:1310 nova/db/sqlalchemy/api.py:1349 #, python-format msgid "Invalid fixed IP Address %s in request" msgstr "Dirección IP fija inválida %s en la solicitud" -#: nova/db/sqlalchemy/api.py:1487 +#: nova/db/sqlalchemy/api.py:1484 #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Dirección de interfaz virtual inválida %s en la solicitud" -#: nova/db/sqlalchemy/api.py:1581 +#: nova/db/sqlalchemy/api.py:1578 #, python-format msgid "" "Unknown osapi_compute_unique_server_name_scope value: %s Flag must be " @@ -6324,22 +5901,22 @@ msgstr "" "Valor de osapi_compute_unique_server_name_scope desconocido: %s El " "distintivo debe ser vacío, \"global\" o \"project\"" -#: nova/db/sqlalchemy/api.py:1741 +#: nova/db/sqlalchemy/api.py:1738 #, python-format msgid "Invalid instance id %s in request" msgstr "ID de instancia %s no válido en la solicitud." -#: nova/db/sqlalchemy/api.py:2019 +#: nova/db/sqlalchemy/api.py:2017 #, python-format msgid "Invalid field name: %s" msgstr "Campo de nombre inválido: %s" -#: nova/db/sqlalchemy/api.py:3248 +#: nova/db/sqlalchemy/api.py:3246 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "El cambio hará el uso menos de 0 para los siguientes recursos: %s" -#: nova/db/sqlalchemy/api.py:4899 +#: nova/db/sqlalchemy/api.py:4898 #, python-format msgid "" "Volume(%s) has lower stats then what is in the database. Instance must " @@ -6349,14 +5926,14 @@ msgstr "" "datos. la instancia debió haber reiniciado o colapsado. Actualizando los " "totales." -#: nova/db/sqlalchemy/api.py:5256 +#: nova/db/sqlalchemy/api.py:5262 #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Fallo en adición de metadata para el agregado %(id)s después de " "%(retries)s intentos" -#: nova/db/sqlalchemy/api.py:5646 +#: nova/db/sqlalchemy/api.py:5652 #, python-format msgid "IntegrityError detected when archiving table %s" msgstr "Se ha detectado un IntegrityError al archivar la tabla %s" @@ -6391,15 +5968,15 @@ msgstr "" msgid "Extra column %(table)s.%(column)s in shadow table" msgstr "Columna extra %(table)s.%(column)s en la tabla shadow" -#: nova/db/sqlalchemy/utils.py:105 +#: nova/db/sqlalchemy/utils.py:103 msgid "Specify `table_name` or `table` param" msgstr "Especificar parámetro `table_name` o `table`" -#: nova/db/sqlalchemy/utils.py:108 +#: nova/db/sqlalchemy/utils.py:106 msgid "Specify only one param `table_name` `table`" msgstr "Especificar solamente un parámetro `table_name` `table`" -#: nova/db/sqlalchemy/utils.py:131 nova/db/sqlalchemy/utils.py:135 +#: nova/db/sqlalchemy/utils.py:129 nova/db/sqlalchemy/utils.py:133 #: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:84 #: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:1103 msgid "Exception while creating table." @@ -6427,12 +6004,12 @@ msgstr "" "Al cargar el módulo %(module_str)s se ha presentado el siguiente error: " "%(ex)s" -#: nova/image/glance.py:306 +#: nova/image/glance.py:327 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "Fallo al instanciar el manejador de descargas para %(scheme)s" -#: nova/image/glance.py:322 +#: nova/image/glance.py:343 #, python-format msgid "Successfully transferred using %s" msgstr "Exitosamente transferido utilizando %s" @@ -6593,7 +6170,7 @@ msgstr "" msgid "Not deleting key %s" msgstr "Sin eliminar la clave %s" -#: nova/network/api.py:195 nova/network/neutronv2/api.py:797 +#: nova/network/api.py:196 nova/network/neutronv2/api.py:812 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "volver a asignar IP flotante %(address)s desde instancia %(instance_id)s" @@ -6620,46 +6197,46 @@ msgstr "Cargando controlador de red '%s'" msgid "Fixed ip %s not found" msgstr "Direción IP fija %s no encontrada" -#: nova/network/floating_ips.py:175 +#: nova/network/floating_ips.py:176 #, python-format msgid "Floating IP %s is not associated. Ignore." msgstr "La IP flotante %s no está asociada. Ignorar." -#: nova/network/floating_ips.py:194 +#: nova/network/floating_ips.py:195 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "La dirección |%(address)s| no está asignada" -#: nova/network/floating_ips.py:198 +#: nova/network/floating_ips.py:199 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "La dirección |%(address)s| no está asignada al proyecto |%(project)s|" -#: nova/network/floating_ips.py:218 +#: nova/network/floating_ips.py:219 #, python-format msgid "Quota exceeded for %s, tried to allocate floating IP" msgstr "Cuota excedida para %s, intentando asignar dirección IP flotante" -#: nova/network/floating_ips.py:277 +#: nova/network/floating_ips.py:278 msgid "Failed to update usages deallocating floating IP" msgstr "No se han podido actualizar los usos desasignando IP flotante " -#: nova/network/floating_ips.py:375 +#: nova/network/floating_ips.py:376 #, python-format msgid "Failed to disassociated floating address: %s" msgstr "Fallo al desasociar la dirección IP flotante: %s" -#: nova/network/floating_ips.py:380 +#: nova/network/floating_ips.py:381 #, python-format msgid "Interface %s not found" msgstr "Interfaz %s no encontrada" -#: nova/network/floating_ips.py:539 +#: nova/network/floating_ips.py:540 #, python-format msgid "Starting migration network for instance %s" msgstr "Comenzando migración de red para la instancia %s" -#: nova/network/floating_ips.py:545 +#: nova/network/floating_ips.py:546 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " @@ -6668,12 +6245,12 @@ msgstr "" "La dirección IP flotante | %(address)s | ya no pertentece a la instancia " "%(instance_uuid)s. No será migrada" -#: nova/network/floating_ips.py:574 +#: nova/network/floating_ips.py:575 #, python-format msgid "Finishing migration network for instance %s" msgstr "Finalizando la migración de red para la instancia %s" -#: nova/network/floating_ips.py:581 +#: nova/network/floating_ips.py:582 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " @@ -6682,7 +6259,7 @@ msgstr "" "La dirección IP flotante |%(address)s| ya no pertenece a la instancia " "%(instance_uuid)s. No se configurará." -#: nova/network/floating_ips.py:624 +#: nova/network/floating_ips.py:625 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -6693,12 +6270,12 @@ msgstr "" "base de datos Nova pero no es visible para el controlador DNS de " "instancia o flotante. Se ignorará." -#: nova/network/floating_ips.py:664 +#: nova/network/floating_ips.py:665 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "El dominio |%(domain)s| ya existe, cambiando zona a |%(av_zone)s|." -#: nova/network/floating_ips.py:673 +#: nova/network/floating_ips.py:674 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "El dominio |%(domain)s| ya existe, cambiando el proyecto a |%(project)s." @@ -6748,52 +6325,52 @@ msgstr "" "Se ha intentado eliminar una regla que no estaba allí: %(chain)r %(rule)r" " %(wrap)r %(top)r" -#: nova/network/linux_net.py:769 +#: nova/network/linux_net.py:777 #, python-format msgid "Removed %(num)d duplicate rules for floating ip %(float)s" msgstr "Se han eliminado %(num)d reglas duplicadas para la IP flotante %(float)s" -#: nova/network/linux_net.py:817 +#: nova/network/linux_net.py:825 #, python-format msgid "Error deleting conntrack entries for %s" msgstr "Error al eliminar las entradas conntrack para %s" -#: nova/network/linux_net.py:1072 +#: nova/network/linux_net.py:1091 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "Excepción al recargar la configuración de dnsmasq: %s" -#: nova/network/linux_net.py:1154 +#: nova/network/linux_net.py:1172 #, python-format msgid "killing radvd threw %s" msgstr "Matando radvd lanzado %s" -#: nova/network/linux_net.py:1308 +#: nova/network/linux_net.py:1333 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "No se puede ejecutar %(cmd)s. Excepción: %(exception)s" -#: nova/network/linux_net.py:1366 +#: nova/network/linux_net.py:1391 #, python-format msgid "Failed removing net device: '%s'" msgstr "Fallo al remover dispositivo de red: '%s'" -#: nova/network/linux_net.py:1543 +#: nova/network/linux_net.py:1568 #, python-format msgid "Adding interface %(interface)s to bridge %(bridge)s" msgstr "Añadiendo la interfaz %(interface)s al puente %(bridge)s" -#: nova/network/linux_net.py:1549 +#: nova/network/linux_net.py:1574 #, python-format msgid "Failed to add interface: %s" msgstr "No se ha podido añadir interfaz: %s " -#: nova/network/manager.py:828 +#: nova/network/manager.py:813 #, python-format msgid "instance-dns-zone not found |%s|." msgstr "instance-dns-zone no encontrada |%s|" -#: nova/network/manager.py:835 +#: nova/network/manager.py:820 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -6804,56 +6381,51 @@ msgstr "" "|%(zone)s|. La instancia está en la zona |%(zone2)s|. No se creará ningún" " registro de DNS." -#: nova/network/manager.py:874 -#, python-format -msgid "Quota exceeded for %s, tried to allocate fixed IP" -msgstr "Cuota excedida para %s, intentando asignar dirección IP flotante" - -#: nova/network/manager.py:934 +#: nova/network/manager.py:943 msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required." msgstr "" -#: nova/network/manager.py:964 +#: nova/network/manager.py:973 msgid "Failed to update usages deallocating fixed IP" msgstr "" "Se ha encontrado un error en la actualización de los usos desasignando IP" " flotante" -#: nova/network/manager.py:988 +#: nova/network/manager.py:997 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "No se puede liberar %s porque vif no existe." -#: nova/network/manager.py:1029 +#: nova/network/manager.py:1038 #, python-format msgid "IP %s leased that is not associated" msgstr "La IP %s alquilada que no está asociada " -#: nova/network/manager.py:1035 +#: nova/network/manager.py:1044 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "IP |%s| alquilada que no está asignada" -#: nova/network/manager.py:1044 +#: nova/network/manager.py:1053 #, python-format msgid "IP %s released that is not associated" msgstr "IP %s liberada que no está asociada" -#: nova/network/manager.py:1048 +#: nova/network/manager.py:1057 #, python-format msgid "IP %s released that was not leased" msgstr "IP %s liberada que no está alquilada" -#: nova/network/manager.py:1066 +#: nova/network/manager.py:1075 #, python-format msgid "%s must be an integer" msgstr "%s debe ser un entero " -#: nova/network/manager.py:1098 +#: nova/network/manager.py:1107 msgid "Maximum allowed length for 'label' is 255." msgstr "La longitud máxima permitida para 'label' es 255." -#: nova/network/manager.py:1118 +#: nova/network/manager.py:1127 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " @@ -6862,18 +6434,18 @@ msgstr "" "Subred(es) demasiado grande(s), se usará el valor predeterminado /%s. " "Para sustituirlo, especifique el distintivo network_size." -#: nova/network/manager.py:1203 +#: nova/network/manager.py:1212 msgid "cidr already in use" msgstr "cidr ya se está utilizando" -#: nova/network/manager.py:1206 +#: nova/network/manager.py:1215 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "" "cidr solicitado (%(cidr)s) está en conflicto con superred existente " "(%(super)s)" -#: nova/network/manager.py:1217 +#: nova/network/manager.py:1226 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " @@ -6882,12 +6454,12 @@ msgstr "" "el cidr solicitado (%(cidr)s) está en conflicto con el cidr más pequeño " "existente (%(smaller)s)" -#: nova/network/manager.py:1311 +#: nova/network/manager.py:1320 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "La red se debe desasociar el proyecto %s antes de la supresión" -#: nova/network/manager.py:1937 +#: nova/network/manager.py:1955 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" @@ -6895,7 +6467,7 @@ msgstr "" "La suma entre el número de redes y el inicio de vlan no puede ser mayor " "que 4094" -#: nova/network/manager.py:1944 +#: nova/network/manager.py:1962 #, python-format msgid "" "The network range is not big enough to fit %(num_networks)s networks. " @@ -6927,30 +6499,26 @@ msgstr "Se ha eliminado %s" msgid "Cannot delete domain |%s|" msgstr "No se puede suprimir el dominio |%s|" -#: nova/network/model.py:94 +#: nova/network/model.py:96 #, python-format msgid "Invalid IP format %s" msgstr "Formato IP inválido %s" -#: nova/network/neutronv2/api.py:212 -msgid "Neutron error: quota exceeded" -msgstr "Error de Neutron: cuota excedida" - -#: nova/network/neutronv2/api.py:215 +#: nova/network/neutronv2/api.py:230 #, python-format msgid "Neutron error creating port on network %s" msgstr "Error de Neutron al crear puerto en la red: %s" -#: nova/network/neutronv2/api.py:248 +#: nova/network/neutronv2/api.py:263 #, python-format msgid "empty project id for instance %s" msgstr "ID de proyecto vacío para la instancia %s" -#: nova/network/neutronv2/api.py:283 +#: nova/network/neutronv2/api.py:298 msgid "No network configured!" msgstr "No hay red configurada!" -#: nova/network/neutronv2/api.py:303 +#: nova/network/neutronv2/api.py:318 #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more " @@ -6959,81 +6527,67 @@ msgstr "" "Se han encontrado varios grupos de seguridad que coinciden con '%s'. " "Utilice un ID para ser más específico." -#: nova/network/neutronv2/api.py:373 +#: nova/network/neutronv2/api.py:388 #, python-format msgid "Failed to update port %s" msgstr "Falló al actualizar el puerto %s" -#: nova/network/neutronv2/api.py:380 +#: nova/network/neutronv2/api.py:395 #, python-format msgid "Failed to delete port %s" msgstr "Ha ocurrido un fallo al eliminar el puerto %s" -#: nova/network/neutronv2/api.py:443 +#: nova/network/neutronv2/api.py:458 #, python-format msgid "Unable to reset device ID for port %s" msgstr "" -#: nova/network/neutronv2/api.py:451 +#: nova/network/neutronv2/api.py:466 #, python-format msgid "Port %s does not exist" msgstr "El puerto %s no existe" -#: nova/network/neutronv2/api.py:454 nova/network/neutronv2/api.py:478 +#: nova/network/neutronv2/api.py:469 nova/network/neutronv2/api.py:493 #, python-format msgid "Failed to delete neutron port %s" msgstr "Fallo al eliminar el puerto de neutron %s" -#: nova/network/neutronv2/api.py:576 -#, python-format -msgid "" -"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: " -"%(exception)s" -msgstr "" -"No se ha podido actualizar el puerto %(portid)s en la subred " -"%(subnet_id)s con la anomalía: %(exception)s" - -#: nova/network/neutronv2/api.py:605 -#, python-format -msgid "Unable to update port %(portid)s with failure: %(exception)s" -msgstr "No se puede actualizar el puerto %(portid)s con anomalía: %(exception)s" - -#: nova/network/neutronv2/api.py:632 +#: nova/network/neutronv2/api.py:647 msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Se han encontrado múltiples redes posibles, usa un identificador de red " "para ser más específico." -#: nova/network/neutronv2/api.py:651 +#: nova/network/neutronv2/api.py:666 #, python-format msgid "Failed to access port %s" msgstr "Fallo al acceder al puerto %s" -#: nova/network/neutronv2/api.py:880 +#: nova/network/neutronv2/api.py:898 #, python-format msgid "Unable to access floating IP %s" msgstr "Incapaz de acceder a la Ip flotante %s" -#: nova/network/neutronv2/api.py:968 +#: nova/network/neutronv2/api.py:986 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Se han encontrado varias coincidencias de agrupaciones de IP flotante " "para el nombre '%s' " -#: nova/network/neutronv2/api.py:1012 +#: nova/network/neutronv2/api.py:1030 #, python-format msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" msgstr "" "Incapaz de acceder a la IP flotante %(fixed_ip)s para el puerto " "%(port_id)s" -#: nova/network/neutronv2/api.py:1071 +#: nova/network/neutronv2/api.py:1089 #, python-format msgid "Unable to update host of port %s" msgstr "Incapaz de actualizar el anfitrión del puerto %s" -#: nova/network/neutronv2/api.py:1107 +#: nova/network/neutronv2/api.py:1125 #, python-format msgid "" "Network %(id)s not matched with the tenants network! The ports tenant " @@ -7206,7 +6760,7 @@ msgstr "Aquí se requiere un objeto del tipo %s" msgid "A NetworkModel is required here" msgstr "aquí se requiere un NetworkModel" -#: nova/objects/instance.py:431 +#: nova/objects/instance.py:433 #, python-format msgid "No save handler for %s" msgstr "No hay manejador de guardado para %s" @@ -7217,11 +6771,11 @@ msgstr "" "No se ha podido notificar a las células la actualización de memoria caché" " de información de instancia" -#: nova/openstack/common/gettextutils.py:320 +#: nova/openstack/common/gettextutils.py:301 msgid "Message objects do not support addition." msgstr "Los objetos de mensaje no soportan adición." -#: nova/openstack/common/gettextutils.py:330 +#: nova/openstack/common/gettextutils.py:311 msgid "" "Message objects do not support str() because they may contain non-ascii " "characters. Please use unicode() or translate() instead." @@ -7246,22 +6800,22 @@ msgstr "" msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" -#: nova/openstack/common/log.py:327 +#: nova/openstack/common/log.py:276 #, python-format msgid "Deprecated: %s" msgstr "En desuso: %s" -#: nova/openstack/common/log.py:436 +#: nova/openstack/common/log.py:385 #, python-format msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "Error al cargar la configuración de registro %(log_config)s: %(err_msg)s" -#: nova/openstack/common/log.py:486 +#: nova/openstack/common/log.py:446 #, python-format msgid "syslog facility must be one of: %s" msgstr "El recurso syslog debe ser uno de: %s" -#: nova/openstack/common/log.py:729 +#: nova/openstack/common/log.py:689 #, python-format msgid "Fatal call to deprecated config: %(msg)s" msgstr "Llamada muy grave a configuración en desuso: %(msg)s" @@ -7329,27 +6883,27 @@ msgstr "Entorno no soportado a través de SSH" msgid "process_input not supported over SSH" msgstr "entrada de proceso no soporta a través de SSH" -#: nova/openstack/common/sslutils.py:98 +#: nova/openstack/common/sslutils.py:95 #, python-format msgid "Invalid SSL version : %s" msgstr "Versión SSL inválida : %s" -#: nova/openstack/common/strutils.py:92 +#: nova/openstack/common/strutils.py:114 #, python-format msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "Valor '%(val)s' no reconocido, los valores aceptables son: %(acceptable)s" -#: nova/openstack/common/strutils.py:197 +#: nova/openstack/common/strutils.py:219 #, python-format msgid "Invalid unit system: \"%s\"" msgstr "Unidad del sistema no valida: \"%s\"" -#: nova/openstack/common/strutils.py:206 +#: nova/openstack/common/strutils.py:228 #, python-format msgid "Invalid string format: %s" msgstr "Formato inválido de cadena: %s" -#: nova/openstack/common/versionutils.py:69 +#: nova/openstack/common/versionutils.py:86 #, python-format msgid "" "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " @@ -7358,7 +6912,7 @@ msgstr "" "%(what)s es obsoleto así como %(as_of)s en beneficio de %(in_favor_of)s y" " puede ser removido en %(remove_in)s." -#: nova/openstack/common/versionutils.py:73 +#: nova/openstack/common/versionutils.py:90 #, python-format msgid "" "%(what)s is deprecated as of %(as_of)s and may be removed in " @@ -7367,6 +6921,16 @@ msgstr "" "%(what)s está obsoleto así como %(as_of)s y puede ser removido en " "%(remove_in)s. Y no se sustituirá." +#: nova/openstack/common/versionutils.py:94 +#, python-format +msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s." +msgstr "" + +#: nova/openstack/common/versionutils.py:97 +#, python-format +msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded." +msgstr "" + #: nova/openstack/common/db/sqlalchemy/migration.py:226 #, python-format msgid "" @@ -7384,11 +6948,11 @@ msgstr "" "La base de datos no está en control de versión, pero tiene tablas. Por " "favor indica la versión actual del esquema manualmente." -#: nova/openstack/common/db/sqlalchemy/utils.py:119 +#: nova/openstack/common/db/sqlalchemy/utils.py:118 msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" -#: nova/openstack/common/db/sqlalchemy/utils.py:162 +#: nova/openstack/common/db/sqlalchemy/utils.py:161 #, python-format msgid "" "There is no `deleted` column in `%s` table. Project doesn't use soft-" @@ -7397,7 +6961,7 @@ msgstr "" "No existe la columna `deleted` en la tbala `%s`. El projecto on utiliza " "la característica de remoción suave." -#: nova/openstack/common/db/sqlalchemy/utils.py:181 +#: nova/openstack/common/db/sqlalchemy/utils.py:180 #, python-format msgid "There is no `project_id` column in `%s` table." msgstr "No existe la columna `project_id` en la tabla `%s`." @@ -7428,7 +6992,7 @@ msgstr "" msgid "Unsupported id columns type" msgstr "Tipo de identificador de columnas no soportado" -#: nova/pci/pci_manager.py:156 +#: nova/pci/pci_manager.py:113 #, python-format msgid "" "Trying to remove device with %(status)s ownership %(instance_uuid)s " @@ -7462,7 +7026,7 @@ msgstr "El controlador debe implementar schedule_run_instance" msgid "Driver must implement select_destinations" msgstr "El controlador debe implementar select_destinatios" -#: nova/scheduler/filter_scheduler.py:80 +#: nova/scheduler/filter_scheduler.py:84 #, python-format msgid "" "Attempting to build %(num_instances)d instance(s) uuids: " @@ -7471,15 +7035,23 @@ msgstr "" "Intentando construir %(num_instances)d instancia(s) con uuids: " "%(instance_uuids)s" -#: nova/scheduler/filter_scheduler.py:109 +#: nova/scheduler/filter_scheduler.py:113 #, python-format msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s" msgstr "Eligiendo anfitrión %(weighed_host)s para la instancia %(instance_uuid)s" -#: nova/scheduler/filter_scheduler.py:169 +#: nova/scheduler/filter_scheduler.py:173 msgid "Instance disappeared during scheduling" msgstr "La instancia ha desaparecido durante la programación" +#: nova/scheduler/filter_scheduler.py:219 +msgid "ServerGroupAffinityFilter not configured" +msgstr "" + +#: nova/scheduler/filter_scheduler.py:224 +msgid "ServerGroupAntiAffinityFilter not configured" +msgstr "" + #: nova/scheduler/host_manager.py:169 #, python-format msgid "Metric name unknown of %r" @@ -7524,7 +7096,6 @@ msgstr "" "'%s' a 'force_nodes'" #: nova/scheduler/host_manager.py:390 -#: nova/scheduler/filters/trusted_filter.py:208 #, python-format msgid "No service for compute ID %s" msgstr "No hay servicio para el ID de cálculo %s " @@ -7567,7 +7138,7 @@ msgstr "Error del último host: %(last_host)s (nodo %(last_node)s): %(exc)s" msgid "Invalid value for 'scheduler_max_attempts', must be >= 1" msgstr "Valor no válido para 'scheduler_max_attempts', debe ser >= 1 " -#: nova/scheduler/utils.py:233 +#: nova/scheduler/utils.py:231 #, python-format msgid "Ignoring the invalid elements of the option %(name)s: %(options)s" msgstr "Ignorando los elementos inválidos de la opción %(name)s: %(options)s" @@ -7577,6 +7148,12 @@ msgstr "Ignorando los elementos inválidos de la opción %(name)s: %(options)s" msgid "%(host_state)s has not been heard from in a while" msgstr "%(host_state)s no ha sido recibido durante un tiempo" +#: nova/scheduler/filters/exact_core_filter.py:36 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" +"VCPU no establecidas; suponiendo que la colección de CPU se ha " +"interrumpido" + #: nova/servicegroup/api.py:70 #, python-format msgid "unknown ServiceGroup driver name: %s" @@ -7695,15 +7272,15 @@ msgstr "Intento de instanciar sigleton" msgid "status must be available" msgstr "el estado debe ser disponible" -#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:245 +#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:290 msgid "already attached" msgstr "ya está conectado" -#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:256 +#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:301 msgid "Instance and volume not in same availability_zone" msgstr "La instancia y el volumen no están en la misma availability_zone" -#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:262 +#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:307 msgid "already detached" msgstr "ya está desconectado" @@ -7711,12 +7288,12 @@ msgstr "ya está desconectado" msgid "unexpected role header" msgstr "cabecera de rol inesperada" -#: nova/tests/api/openstack/test_faults.py:46 +#: nova/tests/api/openstack/test_faults.py:47 msgid "Should be translated." msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3225 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2434 +#: nova/tests/api/openstack/compute/test_servers.py:3279 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2438 msgid "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" @@ -7724,46 +7301,46 @@ msgstr "" "Se ha superado la cuota para las instancias: solicitada 1, pero ya se han" " utilizado 10 de 10 instancias" -#: nova/tests/api/openstack/compute/test_servers.py:3230 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2439 +#: nova/tests/api/openstack/compute/test_servers.py:3284 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2443 msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" msgstr "" "Se ha superado la cuota para ram: Solicitadas 4096, ya utilizadas 8192 de" " 10240 ram" -#: nova/tests/api/openstack/compute/test_servers.py:3235 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2444 +#: nova/tests/api/openstack/compute/test_servers.py:3289 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2448 msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "" "Se ha superado la cuota para núcleos: Solicitados 2, pero ya utilizados 9" " de 10 núcleos" -#: nova/tests/compute/test_compute.py:1680 -#: nova/tests/compute/test_compute.py:1707 -#: nova/tests/compute/test_compute.py:1785 -#: nova/tests/compute/test_compute.py:1825 -#: nova/tests/compute/test_compute.py:5603 +#: nova/tests/compute/test_compute.py:1696 +#: nova/tests/compute/test_compute.py:1723 +#: nova/tests/compute/test_compute.py:1801 +#: nova/tests/compute/test_compute.py:1841 +#: nova/tests/compute/test_compute.py:5644 #, python-format msgid "Running instances: %s" msgstr "Ejecutando instancias: %s" -#: nova/tests/compute/test_compute.py:1687 -#: nova/tests/compute/test_compute.py:1755 -#: nova/tests/compute/test_compute.py:1793 +#: nova/tests/compute/test_compute.py:1703 +#: nova/tests/compute/test_compute.py:1771 +#: nova/tests/compute/test_compute.py:1809 #, python-format msgid "After terminating instances: %s" msgstr "Después de terminar las instancias: %s" -#: nova/tests/compute/test_compute.py:5614 +#: nova/tests/compute/test_compute.py:5655 #, python-format msgid "After force-killing instances: %s" msgstr "Después de finalizar de forma forzada las instancias: %s" -#: nova/tests/compute/test_compute.py:6229 +#: nova/tests/compute/test_compute.py:6271 msgid "wrong host/node" msgstr "host/nodo incorrecto" -#: nova/tests/compute/test_compute.py:10820 +#: nova/tests/compute/test_compute.py:10867 msgid "spawn error" msgstr "error de generación" @@ -7771,7 +7348,16 @@ msgstr "error de generación" msgid "Keypair data is invalid" msgstr "Los datos del par de claves no son válidos" -#: nova/tests/db/test_migrations.py:866 +#: nova/tests/compute/test_resources.py:78 +#, python-format +msgid "Free %(free)d < requested %(requested)d " +msgstr "" + +#: nova/tests/compute/test_resources.py:329 +msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs" +msgstr "" + +#: nova/tests/db/test_migrations.py:923 #, python-format msgid "" "The following migrations are missing a downgrade:\n" @@ -7863,27 +7449,27 @@ msgstr "Cuerpo: %s" msgid "Unexpected status code" msgstr "Código de estado inesperado" -#: nova/tests/virt/hyperv/test_hypervapi.py:517 +#: nova/tests/virt/hyperv/test_hypervapi.py:513 msgid "fake vswitch not found" msgstr "vswitch falso no encontrado" -#: nova/tests/virt/hyperv/test_hypervapi.py:970 +#: nova/tests/virt/hyperv/test_hypervapi.py:966 msgid "Simulated failure" msgstr "Falla simulada" -#: nova/tests/virt/libvirt/fakelibvirt.py:1041 +#: nova/tests/virt/libvirt/fakelibvirt.py:1048 msgid "Expected a list for 'auth' parameter" msgstr "Se esperaba una lista para el parámetro 'auth'" -#: nova/tests/virt/libvirt/fakelibvirt.py:1045 +#: nova/tests/virt/libvirt/fakelibvirt.py:1052 msgid "Expected a function in 'auth[0]' parameter" msgstr "Se esperaba una función en el parámetro 'auth[0]' " -#: nova/tests/virt/libvirt/fakelibvirt.py:1049 +#: nova/tests/virt/libvirt/fakelibvirt.py:1056 msgid "Expected a function in 'auth[1]' parameter" msgstr "Se esperaba una función en el parámetro 'auth[1]' " -#: nova/tests/virt/libvirt/fakelibvirt.py:1060 +#: nova/tests/virt/libvirt/fakelibvirt.py:1067 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." @@ -7891,32 +7477,32 @@ msgstr "" "virEventRegisterDefaultImpl() debe ser invocado antes de que la conexión " "sea utilizada." -#: nova/tests/virt/vmwareapi/fake.py:244 +#: nova/tests/virt/vmwareapi/fake.py:241 #, python-format msgid "Property %(attr)s not set for the managed object %(name)s" msgstr "" "La propiedad %(attr)s no se ha establecido para el objeto gestionado " "%(name)s" -#: nova/tests/virt/vmwareapi/fake.py:969 +#: nova/tests/virt/vmwareapi/fake.py:985 msgid "There is no VM registered" msgstr "No hay ninguna VM registrada" -#: nova/tests/virt/vmwareapi/fake.py:971 nova/tests/virt/vmwareapi/fake.py:1307 +#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1323 #, python-format msgid "Virtual Machine with ref %s is not there" msgstr "La máquina virtual con la referencia %s no está allí" -#: nova/tests/virt/vmwareapi/fake.py:1096 +#: nova/tests/virt/vmwareapi/fake.py:1112 msgid "Session Invalid" msgstr "Sesión no válida" -#: nova/tests/virt/vmwareapi/fake.py:1304 +#: nova/tests/virt/vmwareapi/fake.py:1320 msgid "No Virtual Machine has been registered yet" msgstr "No se ha registrado aún ninguna máquina virtual " #: nova/tests/virt/vmwareapi/test_ds_util.py:221 -#: nova/virt/vmwareapi/ds_util.py:265 +#: nova/virt/vmwareapi/ds_util.py:267 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7946,39 +7532,53 @@ msgstr "" msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "Se han encontrado múltiples URL de buscadores torrent. Fallando." -#: nova/virt/block_device.py:243 +#: nova/virt/block_device.py:241 #, python-format msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" "El controlador ha fallado al asignar el volumen %(volume_id)s en " "%(mountpoint)s" -#: nova/virt/block_device.py:362 +#: nova/virt/block_device.py:363 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "Arrancando con el volumen %(volume_id)s en %(mountpoint)s" -#: nova/virt/driver.py:1242 +#: nova/virt/diagnostics.py:143 +#, python-format +msgid "Invalid type for %s" +msgstr "" + +#: nova/virt/diagnostics.py:147 +#, python-format +msgid "Invalid type for %s entry" +msgstr "" + +#: nova/virt/driver.py:705 +msgid "Hypervisor driver does not support post_live_migration_at_source method" +msgstr "" + +#: nova/virt/driver.py:1261 msgid "Event must be an instance of nova.virt.event.Event" msgstr "El suceso debe ser una instancia de un nova.virt.event.Event" -#: nova/virt/driver.py:1248 +#: nova/virt/driver.py:1267 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "Excepción al asignar el suceso %(event)s: %(ex)s" -#: nova/virt/driver.py:1330 +#: nova/virt/driver.py:1361 msgid "Compute driver option required, but not specified" msgstr "" "La opción de controlador de cálculo es necesaria, pero no se ha " "especificado" -#: nova/virt/driver.py:1333 +#: nova/virt/driver.py:1364 #, python-format msgid "Loading compute driver '%s'" msgstr "Cargando controlador de cálculo '%s' " -#: nova/virt/driver.py:1340 +#: nova/virt/driver.py:1371 msgid "Unable to load the virtualization driver" msgstr "Incapaz de cargar el controlador de virtualización" @@ -8007,7 +7607,7 @@ msgstr "Desconocido" msgid "Key '%(key)s' not in instances '%(inst)s'" msgstr "La clave '%(key)s' no está en las instancias '%(inst)s'" -#: nova/virt/firewall.py:176 +#: nova/virt/firewall.py:174 msgid "Attempted to unfilter instance which is not filtered" msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada" @@ -8089,45 +7689,45 @@ msgstr "cpu_arch no se ha encontrado en flavor_extra_specs" msgid "Baremetal node id not supplied to driver for %r" msgstr "ID de nodo de máquina vacía no proporcionado a controlador para %r" -#: nova/virt/baremetal/driver.py:289 +#: nova/virt/baremetal/driver.py:292 #, python-format msgid "Error deploying instance %(instance)s on baremetal node %(node)s." msgstr "" "Error al desplegar la instancia %(instance)s en nodo de máquina vacía " "%(node)s." -#: nova/virt/baremetal/driver.py:364 +#: nova/virt/baremetal/driver.py:367 #, python-format msgid "Baremetal power manager failed to restart node for instance %r" msgstr "" "El gestor de alimentación de máquina vacía no ha podido reiniciar el nodo" " para la instancia %r" -#: nova/virt/baremetal/driver.py:376 +#: nova/virt/baremetal/driver.py:379 #, python-format msgid "Destroy called on non-existing instance %s" msgstr "Se ha llamado una destrucción en una instancia no existente %s" -#: nova/virt/baremetal/driver.py:394 +#: nova/virt/baremetal/driver.py:397 #, python-format msgid "Error from baremetal driver during destroy: %s" msgstr "Error del controlador de máquina vacía durante la destrucción: %s" -#: nova/virt/baremetal/driver.py:399 +#: nova/virt/baremetal/driver.py:402 #, python-format msgid "Error while recording destroy failure in baremetal database: %s" msgstr "" "Error al registrar la anomalía de destrcción en la base de datos de " "máquina vacía: %s" -#: nova/virt/baremetal/driver.py:414 +#: nova/virt/baremetal/driver.py:417 #, python-format msgid "Baremetal power manager failed to stop node for instance %r" msgstr "" "El gestor de alimentación de máquina vacía no ha podido detener el nodo " "para la instancia %r" -#: nova/virt/baremetal/driver.py:427 +#: nova/virt/baremetal/driver.py:430 #, python-format msgid "Baremetal power manager failed to start node for instance %r" msgstr "" @@ -8218,7 +7818,7 @@ msgstr "" "No se puede activar el cargador de arranque PXE. Los parámetros de " "arranque siguientes no se han pasado al controlador de máquina vacía: %s" -#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:317 +#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:318 #, python-format msgid "Node associated with another instance while waiting for deploy of %s" msgstr "Nodo asociado con otra instancia mientras se esperaba el despliegue de %s" @@ -8238,7 +7838,7 @@ msgstr "El despliegue de PXE se ha completado para la instancia %s" msgid "PXE deploy failed for instance %s" msgstr "Se ha encontrado un error en el despliegue de PXE para la instancia %s" -#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:342 +#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:343 #, python-format msgid "Baremetal node deleted while waiting for deployment of instance %s" msgstr "" @@ -8262,21 +7862,21 @@ msgstr "" "parámetros de arranque no han sido proporcionados al controlador de " "baremetal: %s" -#: nova/virt/baremetal/tilera.py:323 +#: nova/virt/baremetal/tilera.py:324 #, python-format msgid "Tilera deploy started for instance %s" msgstr "Despliegue Tilera iniciado para la instancia %s" -#: nova/virt/baremetal/tilera.py:329 +#: nova/virt/baremetal/tilera.py:330 #, python-format msgid "Tilera deploy completed for instance %s" msgstr "Despliege Tilera completado para instancia %s" -#: nova/virt/baremetal/tilera.py:337 +#: nova/virt/baremetal/tilera.py:338 msgid "Node is unknown error state." msgstr "El nodo está en un estado de error desconocido." -#: nova/virt/baremetal/tilera.py:340 +#: nova/virt/baremetal/tilera.py:341 #, python-format msgid "Tilera deploy failed for instance %s" msgstr "Despliegue tilera fallido para la instancia %s" @@ -8398,76 +7998,57 @@ msgstr "No hay ninguna IP PXE fija asociada a %s" msgid "detach volume could not find tid for %s" msgstr "el volumen de desconexión no ha podido encontrar tid para %s" -#: nova/virt/baremetal/db/sqlalchemy/api.py:198 +#: nova/virt/baremetal/db/sqlalchemy/api.py:199 msgid "instance_uuid must be supplied to bm_node_associate_and_update" msgstr "instance_uuid se debe proporcionar para bm_node_associate_and_update" -#: nova/virt/baremetal/db/sqlalchemy/api.py:210 +#: nova/virt/baremetal/db/sqlalchemy/api.py:211 #, python-format msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s." msgstr "" "No se ha podido asociar la instancia %(i_uuid)s a nodo de máquina vacía " "%(n_uuid)s." -#: nova/virt/baremetal/db/sqlalchemy/api.py:245 -#: nova/virt/baremetal/db/sqlalchemy/api.py:287 +#: nova/virt/baremetal/db/sqlalchemy/api.py:246 +#: nova/virt/baremetal/db/sqlalchemy/api.py:288 #, python-format msgid "Baremetal interface %s not found" msgstr "Interfaz de máquina vacía %s no encontrada" -#: nova/virt/baremetal/db/sqlalchemy/api.py:297 +#: nova/virt/baremetal/db/sqlalchemy/api.py:298 #, python-format msgid "Baremetal interface %s already in use" msgstr "La interfaz de máquina vacía %s ya se está utilizando" -#: nova/virt/baremetal/db/sqlalchemy/api.py:310 +#: nova/virt/baremetal/db/sqlalchemy/api.py:311 #, python-format msgid "Baremetal virtual interface %s not found" msgstr "No se ha encontrado la interfaz virtual de máquina vacía %s" -#: nova/virt/disk/api.py:280 +#: nova/virt/disk/api.py:292 msgid "image already mounted" msgstr "imagen ya montada" -#: nova/virt/disk/api.py:354 -#, python-format -msgid "Ignoring error injecting data into image (%(e)s)" -msgstr "Ignorando el error al inyectar datos en la imagen (%(e)s)" - -#: nova/virt/disk/api.py:376 -#, python-format -msgid "" -"Failed to mount container filesystem '%(image)s' on '%(target)s': " -"%(errors)s" -msgstr "" -"Se ha encontrado un error en el montaje del sistema de archivos de " -"contenedor '%(image)s' en '%(target)s': : %(errors)s" - -#: nova/virt/disk/api.py:406 +#: nova/virt/disk/api.py:418 #, python-format msgid "Failed to teardown container filesystem: %s" msgstr "Fallo al desarmar el contenedor de sistema de archivo: %s" -#: nova/virt/disk/api.py:419 +#: nova/virt/disk/api.py:431 #, python-format msgid "Failed to umount container filesystem: %s" msgstr "No se ha podido desmontar el sistema de archivos de contenedor: %s" -#: nova/virt/disk/api.py:444 -#, python-format -msgid "Ignoring error injecting %(inject)s into image (%(e)s)" -msgstr "Ignorando el error al inyectar %(inject)s en la imagen (%(e)s)" - -#: nova/virt/disk/api.py:604 +#: nova/virt/disk/api.py:616 msgid "Not implemented on Windows" msgstr "No implementado en Windows" -#: nova/virt/disk/api.py:631 +#: nova/virt/disk/api.py:643 #, python-format msgid "User %(username)s not found in password file." msgstr "El usuario %(username)s no se ha encontrado en el archivo de contraseña." -#: nova/virt/disk/api.py:647 +#: nova/virt/disk/api.py:659 #, python-format msgid "User %(username)s not found in shadow file." msgstr "El usuario %(username)s no se ha encontrado en el archivo de duplicación. " @@ -8549,22 +8130,22 @@ msgstr "el dispositivo nbd %s no se ha mostrado" msgid "Detaching from erroneous nbd device returned error: %s" msgstr "La desconexión del dispositivo nbd erróneo ha devuelto un error: %s" -#: nova/virt/disk/vfs/guestfs.py:64 +#: nova/virt/disk/vfs/guestfs.py:77 #, python-format msgid "No operating system found in %s" msgstr "No se ha encontrado ningún sistema operativo en %s" -#: nova/virt/disk/vfs/guestfs.py:70 +#: nova/virt/disk/vfs/guestfs.py:83 #, python-format msgid "Multi-boot operating system found in %s" msgstr "Se ha encontrado sistema operativo multiarranque en %s" -#: nova/virt/disk/vfs/guestfs.py:81 +#: nova/virt/disk/vfs/guestfs.py:94 #, python-format msgid "No mount points found in %(root)s of %(imgfile)s" msgstr "No se han encontrado puntos de montaje en %(root)s de %(imgfile)s" -#: nova/virt/disk/vfs/guestfs.py:95 +#: nova/virt/disk/vfs/guestfs.py:108 #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(imgfile)s with libguestfs" @@ -8573,22 +8154,22 @@ msgstr "" "Error montaod %(device)s en %(dir)s en imagen %(imgfile)s con libguestfs " "(%(e)s)" -#: nova/virt/disk/vfs/guestfs.py:131 +#: nova/virt/disk/vfs/guestfs.py:154 #, python-format msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)" msgstr "Error al montar %(imgfile)s con libguestfs (%(e)s)" -#: nova/virt/disk/vfs/guestfs.py:147 +#: nova/virt/disk/vfs/guestfs.py:170 #, python-format msgid "Failed to close augeas %s" msgstr "No se ha podido cerrar augeas %s" -#: nova/virt/disk/vfs/guestfs.py:155 +#: nova/virt/disk/vfs/guestfs.py:178 #, python-format msgid "Failed to shutdown appliance %s" msgstr "No se ha podido concluir el dispositivo %s" -#: nova/virt/disk/vfs/guestfs.py:163 +#: nova/virt/disk/vfs/guestfs.py:186 #, python-format msgid "Failed to close guest handle %s" msgstr "No se ha podido cerrar manejador de invitado %s" @@ -8659,11 +8240,11 @@ msgstr "VM no encontrada: %s" msgid "Duplicate VM name found: %s" msgstr "Se ha encontrado nombre de VM duplicado: %s" -#: nova/virt/hyperv/migrationops.py:97 +#: nova/virt/hyperv/migrationops.py:98 msgid "Cannot cleanup migration files" msgstr "No se pueden limpiar los archivos de migración" -#: nova/virt/hyperv/migrationops.py:105 +#: nova/virt/hyperv/migrationops.py:106 #, python-format msgid "" "Cannot resize the root disk to a smaller size. Current size: " @@ -8672,11 +8253,16 @@ msgstr "" "No se puede cambiar el tamaño del disco raíz a un menor tamaño. Tamaño " "actual: %(curr_root_gb)s GB. Tamaño solicitado: %(new_root_gb)s GB" -#: nova/virt/hyperv/migrationops.py:200 +#: nova/virt/hyperv/migrationops.py:155 +#, python-format +msgid "Config drive is required by instance: %s, but it does not exist." +msgstr "" + +#: nova/virt/hyperv/migrationops.py:214 msgid "Cannot resize a VHD to a smaller size" msgstr "No se puede redimensionar un VHD a un tamaño menor" -#: nova/virt/hyperv/migrationops.py:245 +#: nova/virt/hyperv/migrationops.py:259 #, python-format msgid "Cannot find boot VHD file for instance: %s" msgstr "No se puede encontrar el archivo VHD para la instancia: %s" @@ -8697,7 +8283,7 @@ msgstr "" msgid "No external vswitch found" msgstr "No se ha encontrado vswitch externo" -#: nova/virt/hyperv/pathutils.py:72 +#: nova/virt/hyperv/pathutils.py:73 #, python-format msgid "The file copy from %(src)s to %(dest)s failed" msgstr "Se ha encontrado un error en la copia del archivo de %(src)s a %(dest)s" @@ -8712,25 +8298,20 @@ msgstr "No se ha podido eliminar la instantánea para VM %s" msgid "Unsupported disk format: %s" msgstr "Formato de disco no soportado: %s" -#: nova/virt/hyperv/vhdutils.py:151 -#, python-format -msgid "The %(vhd_type)s type VHD is not supported" -msgstr "El VHD de tipo %(vhd_type)s no está soportado" +#: nova/virt/hyperv/vhdutils.py:77 +msgid "VHD differencing disks cannot be resized" +msgstr "" -#: nova/virt/hyperv/vhdutils.py:162 +#: nova/virt/hyperv/vhdutils.py:165 #, python-format msgid "Unable to obtain block size from VHD %(vhd_path)s" msgstr "Incapaz de obtener el tamaño de bloque del VHD %(vhd_path)s" -#: nova/virt/hyperv/vhdutils.py:209 +#: nova/virt/hyperv/vhdutils.py:212 msgid "Unsupported virtual disk format" msgstr "Formato de disco virtual no soportado." -#: nova/virt/hyperv/vhdutilsv2.py:135 -msgid "Differencing VHDX images are not supported" -msgstr "La diferenciación de imágenes VHDX no está soportada" - -#: nova/virt/hyperv/vhdutilsv2.py:158 +#: nova/virt/hyperv/vhdutilsv2.py:160 #, python-format msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s" msgstr "" @@ -8742,48 +8323,46 @@ msgstr "" msgid "VIF driver not found for network_api_class: %s" msgstr "No se ha encontrado el controlador VIF para network_api_class: %s" -#: nova/virt/hyperv/vmops.py:169 +#: nova/virt/hyperv/vmops.py:198 #, python-format msgid "" -"Cannot resize a VHD to a smaller size, the original size is " -"%(base_vhd_size)s, the newer size is %(root_vhd_size)s" +"Cannot resize a VHD to a smaller size, the original size is %(old_size)s," +" the newer size is %(new_size)s" msgstr "" -"No se puede cambiar de tamaño un VHD a un tamaño menor, el tamaño " -"original es %(base_vhd_size)s, el tamaño nuevo es %(root_vhd_size)s" -#: nova/virt/hyperv/vmops.py:206 +#: nova/virt/hyperv/vmops.py:228 msgid "Spawning new instance" msgstr "Generando nueva instancia" -#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:567 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:576 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format \"%s\" no válido" -#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:571 +#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:580 msgid "Using config drive for instance" msgstr "Utilizando dispositivo de configuración para instancia" -#: nova/virt/hyperv/vmops.py:296 +#: nova/virt/hyperv/vmops.py:320 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creando unidad de configuración en %(path)s" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:596 +#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:605 #, python-format msgid "Creating config drive failed with error: %s" msgstr "La creación de unidad de configuración ha fallado con el error: %s" -#: nova/virt/hyperv/vmops.py:340 +#: nova/virt/hyperv/vmops.py:371 msgid "Got request to destroy instance" msgstr "Se ha obtenido una solicitud para destruir instancia" -#: nova/virt/hyperv/vmops.py:359 +#: nova/virt/hyperv/vmops.py:390 #, python-format msgid "Failed to destroy instance: %s" msgstr "No se ha podido destruir instancia: %s" -#: nova/virt/hyperv/vmops.py:412 +#: nova/virt/hyperv/vmops.py:443 #, python-format msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "No se ha podido cambiar el estado de vm de %(vm_name)s a %(req_state)s " @@ -8867,21 +8446,21 @@ msgstr "No hay nombres de dispositivo de disco libres para el prefijo '%s'" msgid "Unable to determine disk bus for '%s'" msgstr "No se puede determinar el bus de disco para '%s'" -#: nova/virt/libvirt/driver.py:556 +#: nova/virt/libvirt/driver.py:552 #, python-format msgid "Connection to libvirt lost: %s" msgstr "Conexión hacia libvirt perdida: %s" -#: nova/virt/libvirt/driver.py:739 +#: nova/virt/libvirt/driver.py:741 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "No se puede manejar la solicitud de autenticación para las credenciales %d" -#: nova/virt/libvirt/driver.py:932 +#: nova/virt/libvirt/driver.py:924 msgid "operation time out" msgstr "Tiempo de espera agotado para la operación" -#: nova/virt/libvirt/driver.py:1257 +#: nova/virt/libvirt/driver.py:1248 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " @@ -8890,61 +8469,66 @@ msgstr "" "El volúmen establece el tamaño de bloque, pero el hipervisor libvirt " "actual '%s' no soporta tamaño de bloque personalizado." -#: nova/virt/libvirt/driver.py:1264 +#: nova/virt/libvirt/driver.py:1255 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" "El volúmen establece el tamaño de bloque, pero se requiere libvirt '%s' o" " mayor." -#: nova/virt/libvirt/driver.py:1352 +#: nova/virt/libvirt/driver.py:1345 msgid "Swap only supports host devices" msgstr "El espacio de intercambio solamente soporta dispositivos de anfitrión " -#: nova/virt/libvirt/driver.py:1635 +#: nova/virt/libvirt/driver.py:1631 msgid "libvirt error while requesting blockjob info." msgstr "error de libvirt al solicitar información de blockjob." -#: nova/virt/libvirt/driver.py:1776 +#: nova/virt/libvirt/driver.py:1774 msgid "Found no disk to snapshot." msgstr "No se ha encontrado disco relacionado a instantánea." -#: nova/virt/libvirt/driver.py:1868 +#: nova/virt/libvirt/driver.py:1866 #, python-format msgid "Unknown type: %s" msgstr "Tipo desconocido: %s" -#: nova/virt/libvirt/driver.py:1873 +#: nova/virt/libvirt/driver.py:1871 msgid "snapshot_id required in create_info" msgstr "snapshot_id es requerido en create_info" -#: nova/virt/libvirt/driver.py:1931 +#: nova/virt/libvirt/driver.py:1929 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" "Libvirt '%s' o mayor se requiere para remoción en línea de instantáneas " "de volumen." -#: nova/virt/libvirt/driver.py:1938 +#: nova/virt/libvirt/driver.py:1936 #, python-format msgid "Unknown delete_info type %s" msgstr "Tipo delete_info %s desconocido" -#: nova/virt/libvirt/driver.py:1966 +#: nova/virt/libvirt/driver.py:1964 #, python-format msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2407 nova/virt/xenapi/vmops.py:1552 +#: nova/virt/libvirt/driver.py:2406 nova/virt/xenapi/vmops.py:1561 msgid "Guest does not have a console available" msgstr "El invitado no tiene una consola disponible" -#: nova/virt/libvirt/driver.py:2823 +#: nova/virt/libvirt/driver.py:2735 +#, python-format +msgid "%s format is not supported" +msgstr "" + +#: nova/virt/libvirt/driver.py:2841 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "La remoción de dispositivos PCI con libvirt < %(ver)s no está permitida" -#: nova/virt/libvirt/driver.py:2989 +#: nova/virt/libvirt/driver.py:2984 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " @@ -8953,27 +8537,19 @@ msgstr "" "La configuración ha solicitado un modelo CPU explícito, pero el " "hipervisor libvirt actual '%s' no soporta la selección de modelos de CPU" -#: nova/virt/libvirt/driver.py:2995 +#: nova/virt/libvirt/driver.py:2990 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" "La configuración ha solicitado un modelo de CPU personalizado, pero no se" " ha proporcionado ningún nombre de modelo" -#: nova/virt/libvirt/driver.py:2999 +#: nova/virt/libvirt/driver.py:2994 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" "No se debe establecer un nombre de modelo de CPU cuando se solicita un " "modelo de CPU de host" -#: nova/virt/libvirt/driver.py:3019 -msgid "" -"Passthrough of the host CPU was requested but this libvirt version does " -"not support this feature" -msgstr "" -"Se ha solicitado el paso a través de la CPU de host pero esta versión de " -"libvirt no soporta esta función" - -#: nova/virt/libvirt/driver.py:3567 +#: nova/virt/libvirt/driver.py:3586 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " @@ -8982,7 +8558,7 @@ msgstr "" "Error de libvirt durante la búsqueda de %(instance_id)s: [Código de Error" " %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3588 +#: nova/virt/libvirt/driver.py:3607 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " @@ -8991,27 +8567,27 @@ msgstr "" "Error de libvirt al buscar %(instance_name)s: [Código de error " "%(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3851 +#: nova/virt/libvirt/driver.py:3873 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "Configuración vcpu_pin_set inválida, fuera de rango de cpu de hipervisor." -#: nova/virt/libvirt/driver.py:3974 +#: nova/virt/libvirt/driver.py:3998 msgid "libvirt version is too old (does not support getVersion)" msgstr "La versión libvirt es demasiado antigua (no soporta getVersion)" -#: nova/virt/libvirt/driver.py:4335 +#: nova/virt/libvirt/driver.py:4359 msgid "Block migration can not be used with shared storage." msgstr "" "No se puede utilizar la migración de bloque con almacenamiento " "compartido. " -#: nova/virt/libvirt/driver.py:4344 +#: nova/virt/libvirt/driver.py:4368 msgid "Live migration can not be used without shared storage." msgstr "" "No se puede utilizar la migración en directo con almacenamiento " "compartido." -#: nova/virt/libvirt/driver.py:4414 +#: nova/virt/libvirt/driver.py:4438 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " @@ -9021,7 +8597,7 @@ msgstr "" "demasiado grande (disponible en host de destino: %(available)s < " "necesario: %(necessary)s)" -#: nova/virt/libvirt/driver.py:4453 +#: nova/virt/libvirt/driver.py:4477 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -9036,12 +8612,12 @@ msgstr "" "\n" "Consulte %(u)s" -#: nova/virt/libvirt/driver.py:4516 +#: nova/virt/libvirt/driver.py:4540 #, python-format msgid "The firewall filter for %s does not exist" msgstr "El filtro de cortafuegos para %s no existe " -#: nova/virt/libvirt/driver.py:4579 +#: nova/virt/libvirt/driver.py:4603 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " "or your destination node does not support retrieving listen addresses. " @@ -9050,7 +8626,7 @@ msgid "" "address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." msgstr "" -#: nova/virt/libvirt/driver.py:4596 +#: nova/virt/libvirt/driver.py:4620 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," " and the graphics (VNC and/or SPICE) listen addresses on the destination" @@ -9060,7 +8636,7 @@ msgid "" "succeed, but the VM will continue to listen on the current addresses." msgstr "" -#: nova/virt/libvirt/driver.py:4964 +#: nova/virt/libvirt/driver.py:4997 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" @@ -9069,34 +8645,46 @@ msgstr "" "Error de libvirt al obtener la descripción de %(instance_name)s: [Código " "de error %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:5090 +#: nova/virt/libvirt/driver.py:5123 msgid "Unable to resize disk down." msgstr "Incapaz de reducir el tamaño del disco." -#: nova/virt/libvirt/imagebackend.py:257 +#: nova/virt/libvirt/imagebackend.py:258 #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "No se puede cargar la linea %(line)s, se ha obtenido el error %(error)s" -#: nova/virt/libvirt/imagebackend.py:272 +#: nova/virt/libvirt/imagebackend.py:273 msgid "Attempted overwrite of an existing value." msgstr "Se ha intentado sobreescribir un valor ya existente." -#: nova/virt/libvirt/imagebackend.py:433 +#: nova/virt/libvirt/imagebackend.py:316 +msgid "clone() is not implemented" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:449 msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Debes especificar la bandera images_volue_group para utilizar imagenes " "LVM." -#: nova/virt/libvirt/imagebackend.py:548 +#: nova/virt/libvirt/imagebackend.py:522 msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "Debes especificar la bandera images_rbd_pool para utilizar imagenes rbd." -#: nova/virt/libvirt/imagebackend.py:660 -msgid "rbd python libraries not found" -msgstr "Las librerías rbd python no han sido encontradas" +#: nova/virt/libvirt/imagebackend.py:612 +msgid "installed version of librbd does not support cloning" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:623 +msgid "Image is not raw format" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:631 +msgid "No image locations are accessible" +msgstr "" -#: nova/virt/libvirt/imagebackend.py:703 +#: nova/virt/libvirt/imagebackend.py:651 #, python-format msgid "Unknown image_type=%s" msgstr "image_type=%s desconocido " @@ -9125,25 +8713,41 @@ msgstr "La vía de acceso %s debe ser el volumen lógico LVM" msgid "volume_clear='%s' is not handled" msgstr "volume_clear='%s' no está manejado" +#: nova/virt/libvirt/rbd.py:104 +msgid "rbd python libraries not found" +msgstr "Las librerías rbd python no han sido encontradas" + +#: nova/virt/libvirt/rbd.py:159 +msgid "Not stored in rbd" +msgstr "No está almacenado en rbd" + +#: nova/virt/libvirt/rbd.py:163 +msgid "Blank components" +msgstr "Componentes en blanco" + +#: nova/virt/libvirt/rbd.py:166 +msgid "Not an rbd snapshot" +msgstr "No es una instantánea rbd" + #: nova/virt/libvirt/utils.py:79 msgid "Cannot find any Fibre Channel HBAs" msgstr "No se puede encontrar ningún HBA de canal de fibra" -#: nova/virt/libvirt/utils.py:437 +#: nova/virt/libvirt/utils.py:391 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "No se puede recuperar la vía de acceso ed dispositivo raíz de la " "configuración de libvirt de instancia" -#: nova/virt/libvirt/vif.py:356 nova/virt/libvirt/vif.py:574 -#: nova/virt/libvirt/vif.py:750 +#: nova/virt/libvirt/vif.py:338 nova/virt/libvirt/vif.py:545 +#: nova/virt/libvirt/vif.py:709 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "El parámetro vif_type debe estar presente para esta implementación de " "vif_driver" -#: nova/virt/libvirt/vif.py:362 nova/virt/libvirt/vif.py:580 -#: nova/virt/libvirt/vif.py:756 +#: nova/virt/libvirt/vif.py:344 nova/virt/libvirt/vif.py:551 +#: nova/virt/libvirt/vif.py:715 #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type=%s inesperado" @@ -9166,55 +8770,27 @@ msgstr "No se puede localizar ningún dispositivo de canal de fibra" msgid "Fibre Channel device not found." msgstr "No se ha encontrado el dispositivo de canal de fibra." -#: nova/virt/vmwareapi/driver.py:104 -msgid "" -"The VMware ESX driver is now deprecated and will be removed in the Juno " -"release. The VC driver will remain and continue to be supported." -msgstr "" -"El controlador de VMware ESX esta ahora obsoleto y será removido en la " -"liberación Juno. El controlador CV se mantendrá y seguirá siendo " -"soportado." - -#: nova/virt/vmwareapi/driver.py:116 -msgid "" -"Must specify host_ip, host_username and host_password to use " -"compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver" -msgstr "" -"Se debe especificar host_ip, host_username y host_password para usar " -"compute_driver=vmwareapi.VMwareESXDriver o vmwareapi.VMwareVCDriver" - -#: nova/virt/vmwareapi/driver.py:128 +#: nova/virt/vmwareapi/driver.py:127 #, python-format msgid "Invalid Regular Expression %s" msgstr "La expresión regular %s es inválida" -#: nova/virt/vmwareapi/driver.py:243 -msgid "Instance cannot be found in host, or in an unknownstate." -msgstr "" -"La instancia no se puede encontrar en el anfitrión o en un estado " -"desconocido" - -#: nova/virt/vmwareapi/driver.py:403 +#: nova/virt/vmwareapi/driver.py:141 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "Todos los clusters especificados %s no fueron encontrados en vCenter" -#: nova/virt/vmwareapi/driver.py:412 -#, python-format -msgid "The following clusters could not be found in the vCenter %s" -msgstr "Los siguientes clusters no pueden ser encontrados en el vcenter %s" - -#: nova/virt/vmwareapi/driver.py:551 +#: nova/virt/vmwareapi/driver.py:319 #, python-format msgid "The resource %s does not exist" msgstr "El recurso %s no existe" -#: nova/virt/vmwareapi/driver.py:597 +#: nova/virt/vmwareapi/driver.py:381 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "Cluster o nombre de pool de recursos inválido: %s" -#: nova/virt/vmwareapi/driver.py:771 +#: nova/virt/vmwareapi/driver.py:555 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." @@ -9223,22 +8799,22 @@ msgstr "" "vCenter de VMware; por lo tanto no se puede regresar tiempo de ejecución " "solamente para un huésped." -#: nova/virt/vmwareapi/driver.py:884 +#: nova/virt/vmwareapi/driver.py:678 #, python-format msgid "Unable to validate session %s!" msgstr "Incapaz de validar sesión %s!" -#: nova/virt/vmwareapi/driver.py:926 +#: nova/virt/vmwareapi/driver.py:720 #, python-format msgid "Session %s is inactive!" msgstr "La sesión %s se encuentra inactiva!" -#: nova/virt/vmwareapi/driver.py:1017 +#: nova/virt/vmwareapi/driver.py:811 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "Tarea [%(task_name)s] %(task_ref)s estado: error %(error_info)s" -#: nova/virt/vmwareapi/driver.py:1027 +#: nova/virt/vmwareapi/driver.py:821 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "En vmwareapi:_poll_task, se ha obtenido este error %s" @@ -9259,15 +8835,15 @@ msgstr "" msgid "Capacity is smaller than free space" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:109 +#: nova/virt/vmwareapi/ds_util.py:111 msgid "datastore name empty" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:114 nova/virt/vmwareapi/ds_util.py:146 +#: nova/virt/vmwareapi/ds_util.py:116 nova/virt/vmwareapi/ds_util.py:148 msgid "path component cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:160 +#: nova/virt/vmwareapi/ds_util.py:162 msgid "datastore path empty" msgstr "" @@ -9431,26 +9007,26 @@ msgstr "Excepción en %s " msgid "Unable to retrieve value for %(path)s Reason: %(reason)s" msgstr "Incapaz de obtener valor de %(path)s Razón: %(reason)s" -#: nova/virt/vmwareapi/vm_util.py:196 +#: nova/virt/vmwareapi/vm_util.py:202 #, python-format msgid "%s is not supported." msgstr "%s no está soportada." -#: nova/virt/vmwareapi/vm_util.py:989 +#: nova/virt/vmwareapi/vm_util.py:1037 msgid "No host available on cluster" msgstr "No hay anfitrión disponible en cluster." -#: nova/virt/vmwareapi/vm_util.py:1083 +#: nova/virt/vmwareapi/vm_util.py:1131 #, python-format msgid "Failed to get cluster references %s" msgstr "Fallo al obtener las referencias del cluster %s" -#: nova/virt/vmwareapi/vm_util.py:1095 +#: nova/virt/vmwareapi/vm_util.py:1143 #, python-format msgid "Failed to get resource pool references %s" msgstr "Fallo al obtener las referencias del pool de recursos %s" -#: nova/virt/vmwareapi/vm_util.py:1285 +#: nova/virt/vmwareapi/vm_util.py:1334 msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None" msgstr "" "vmwareapi:vm_util:clone_vmref_for_instance, ha sido llamada con " @@ -9461,15 +9037,15 @@ msgstr "" msgid "Extending virtual disk failed with error: %s" msgstr "La extensión del disco virtual ha fallado con el error: %s" -#: nova/virt/vmwareapi/vmops.py:249 +#: nova/virt/vmwareapi/vmops.py:253 msgid "Image disk size greater than requested disk size" msgstr "La imagen de disco es más grande que el tamaño del disco solicitado" -#: nova/virt/vmwareapi/vmops.py:856 +#: nova/virt/vmwareapi/vmops.py:861 msgid "instance is not powered on" msgstr "instancia no activada" -#: nova/virt/vmwareapi/vmops.py:884 +#: nova/virt/vmwareapi/vmops.py:889 msgid "Instance does not exist on backend" msgstr "" @@ -9488,27 +9064,27 @@ msgid "" "contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:972 +#: nova/virt/vmwareapi/vmops.py:971 msgid "pause not supported for vmwareapi" msgstr "pausa no soportada para vmwareapi" -#: nova/virt/vmwareapi/vmops.py:976 +#: nova/virt/vmwareapi/vmops.py:975 msgid "unpause not supported for vmwareapi" msgstr "cancelación de pausa no soportada para vmwareapi" -#: nova/virt/vmwareapi/vmops.py:994 +#: nova/virt/vmwareapi/vmops.py:993 msgid "instance is powered off and cannot be suspended." msgstr "instancia está desactivada y no se puede suspender. " -#: nova/virt/vmwareapi/vmops.py:1014 +#: nova/virt/vmwareapi/vmops.py:1013 msgid "instance is not in a suspended state" msgstr "la instancia no está en un estado suspendido" -#: nova/virt/vmwareapi/vmops.py:1102 -msgid "instance is suspended and cannot be powered off." -msgstr "la instancia está suspendida y no se puede desactivar " +#: nova/virt/vmwareapi/vmops.py:1113 +msgid "Unable to shrink disk." +msgstr "" -#: nova/virt/vmwareapi/vmops.py:1193 +#: nova/virt/vmwareapi/vmops.py:1172 #, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" @@ -9517,17 +9093,27 @@ msgstr "" "En vmwareapi:vmops:confirm_migration, se ha obtenido esta excepción al " "destruir la máquina virtual: %s" -#: nova/virt/vmwareapi/vmops.py:1255 nova/virt/xenapi/vmops.py:1497 +#: nova/virt/vmwareapi/vmops.py:1248 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" "Se han encontrado %(instance_count)d rearranques colgados de más de " "%(timeout)d segundos" -#: nova/virt/vmwareapi/vmops.py:1259 nova/virt/xenapi/vmops.py:1501 +#: nova/virt/vmwareapi/vmops.py:1252 nova/virt/xenapi/vmops.py:1504 msgid "Automatically hard rebooting" msgstr "Rearrancando automáticamente de forma permanente" +#: nova/virt/vmwareapi/vmops.py:1570 +#, python-format +msgid "No device with interface-id %s exists on VM" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1580 +#, python-format +msgid "No device with MAC address %s exists on the VM" +msgstr "" + #: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" @@ -9560,14 +9146,14 @@ msgstr "" "El punto de montaje %(mountpoint)s se desligó de la instancia " "%(instance_name)s" -#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1768 +#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1777 #, python-format msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" msgstr "" "TIEMPO DE ESPERA EXCEDIDO: La llamada a %(method)s ha excedido el tiempo " "de espera. args=%(args)r" -#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1773 +#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1782 #, python-format msgid "" "NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " @@ -9576,7 +9162,7 @@ msgstr "" "SIN IMPLEMENTAR: el agente no soporta la llamada a %(method)s. " "args=%(args)r" -#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1778 +#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1787 #, python-format msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" msgstr "La llamada a %(method)s ha devuelto un error: %(e)s. args=%(args)r" @@ -9650,21 +9236,21 @@ msgstr "" msgid "Failure while cleaning up attached VDIs" msgstr "Error al limpiar VDI conectados " -#: nova/virt/xenapi/driver.py:386 +#: nova/virt/xenapi/driver.py:390 #, python-format msgid "Could not determine key: %s" msgstr "No se ha podido determinar la clave: %s" -#: nova/virt/xenapi/driver.py:636 +#: nova/virt/xenapi/driver.py:641 msgid "Host startup on XenServer is not supported." msgstr "No se soporta el arranque de host en XenServer." -#: nova/virt/xenapi/fake.py:811 +#: nova/virt/xenapi/fake.py:820 #, python-format msgid "xenapi.fake does not have an implementation for %s" msgstr "xenapi.fake no tiene una implementación para %s" -#: nova/virt/xenapi/fake.py:919 +#: nova/virt/xenapi/fake.py:928 #, python-format msgid "" "xenapi.fake does not have an implementation for %s or it has been called " @@ -9673,7 +9259,7 @@ msgstr "" "xenapi.fake no tiene una implementación para %s o ha sido llamada con un " "número incorrecto de argumentos" -#: nova/virt/xenapi/host.py:74 +#: nova/virt/xenapi/host.py:73 #, python-format msgid "" "Instance %(name)s running on %(host)s could not be found in the database:" @@ -9683,21 +9269,21 @@ msgstr "" "encontrar en la base de datos, suponiendo que se trata de una máquina " "virtual de trabajador y se salta la migración de ping a un nuevo host" -#: nova/virt/xenapi/host.py:86 +#: nova/virt/xenapi/host.py:85 #, python-format msgid "Aggregate for host %(host)s count not be found." msgstr "No se ha podido encontrar el agregado para el host %(host)s. " -#: nova/virt/xenapi/host.py:105 +#: nova/virt/xenapi/host.py:104 #, python-format msgid "Unable to migrate VM %(vm_ref)s from %(host)s" msgstr "Incapaz de migrar VM %(vm_ref)s desde %(host)s" -#: nova/virt/xenapi/host.py:186 +#: nova/virt/xenapi/host.py:185 msgid "Failed to parse information about a pci device for passthrough" msgstr "Fallo al pasar información sobre el dispositivo pci para el traspaso" -#: nova/virt/xenapi/host.py:259 +#: nova/virt/xenapi/host.py:258 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to " @@ -9706,16 +9292,16 @@ msgstr "" "El nombre del anfitrión ha cambiado de %(old)s a %(new)s. Se requiere un " "reinicio para hacer efecto." -#: nova/virt/xenapi/host.py:284 +#: nova/virt/xenapi/host.py:283 #, python-format msgid "Failed to extract instance support from %s" msgstr "No se ha podido extraer el soporte de instancia de %s" -#: nova/virt/xenapi/host.py:301 +#: nova/virt/xenapi/host.py:300 msgid "Unable to get updated status" msgstr "No se puede obtener el estado actualizado" -#: nova/virt/xenapi/host.py:304 +#: nova/virt/xenapi/host.py:303 #, python-format msgid "The call to %(method)s returned an error: %(e)s." msgstr "La llamada a %(method)s ha devuelto un error: %(e)s." @@ -9793,7 +9379,7 @@ msgstr "" "PIF %(pif_uuid)s para la red %(bridge)s tiene identificador de VLAN " "%(pif_vlan)d. Se esperaba %(vlan_num)d" -#: nova/virt/xenapi/vm_utils.py:208 +#: nova/virt/xenapi/vm_utils.py:210 #, python-format msgid "" "Device id %(id)s specified is not supported by hypervisor version " @@ -9802,16 +9388,16 @@ msgstr "" "El dispositivo con identificador %(id)s especificado no está soportado " "por la versión del hipervisor %(version)s" -#: nova/virt/xenapi/vm_utils.py:326 nova/virt/xenapi/vm_utils.py:341 +#: nova/virt/xenapi/vm_utils.py:328 nova/virt/xenapi/vm_utils.py:343 msgid "VM already halted, skipping shutdown..." msgstr "VM ya se ha detenido, omitiendo la conclusión... " -#: nova/virt/xenapi/vm_utils.py:393 +#: nova/virt/xenapi/vm_utils.py:395 #, python-format msgid "VBD %s already detached" msgstr "VBD %s ya se ha desconectado" -#: nova/virt/xenapi/vm_utils.py:396 +#: nova/virt/xenapi/vm_utils.py:398 #, python-format msgid "" "VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt " @@ -9820,36 +9406,36 @@ msgstr "" "La desconexión del VBD %(vbd_ref)s ha fallado con \"%(err)s\", intento " "%(num_attempt)d/%(max_attempts)d" -#: nova/virt/xenapi/vm_utils.py:403 +#: nova/virt/xenapi/vm_utils.py:405 #, python-format msgid "Unable to unplug VBD %s" msgstr "Imposible desconectar VBD %s" -#: nova/virt/xenapi/vm_utils.py:406 +#: nova/virt/xenapi/vm_utils.py:408 #, python-format msgid "Reached maximum number of retries trying to unplug VBD %s" msgstr "Se ha alcanzado el número máximo de reintentos de desconectar VBD %s " -#: nova/virt/xenapi/vm_utils.py:418 +#: nova/virt/xenapi/vm_utils.py:420 #, python-format msgid "Unable to destroy VBD %s" msgstr "Imposible destruir VBD %s" -#: nova/virt/xenapi/vm_utils.py:471 +#: nova/virt/xenapi/vm_utils.py:473 #, python-format msgid "Unable to destroy VDI %s" msgstr "No se puede destruir VDI %s" -#: nova/virt/xenapi/vm_utils.py:517 +#: nova/virt/xenapi/vm_utils.py:519 msgid "SR not present and could not be introduced" msgstr "SR no está presente y no se ha podido introducir" -#: nova/virt/xenapi/vm_utils.py:701 +#: nova/virt/xenapi/vm_utils.py:703 #, python-format msgid "No primary VDI found for %s" msgstr "No se ha encontrado VDI primario para %s" -#: nova/virt/xenapi/vm_utils.py:793 +#: nova/virt/xenapi/vm_utils.py:795 #, python-format msgid "" "Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s" @@ -9858,12 +9444,12 @@ msgstr "" "Solo los SRs basados en archivo (ext/NFS) están soportados por esta " "característica. SR %(uuid)s es del tipo %(type)s" -#: nova/virt/xenapi/vm_utils.py:872 +#: nova/virt/xenapi/vm_utils.py:874 #, python-format msgid "Multiple base images for image: %s" msgstr "Múltiple imágenes base para la imagen: %s" -#: nova/virt/xenapi/vm_utils.py:927 +#: nova/virt/xenapi/vm_utils.py:929 #, python-format msgid "" "VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor " @@ -9872,31 +9458,31 @@ msgstr "" "El VDI %(vdi_ref)s es de %(virtual_size)d bytes lo que es mayor que el " "tamaño del sabor de %(new_disk_size)d bytes." -#: nova/virt/xenapi/vm_utils.py:938 nova/virt/xenapi/vmops.py:1037 +#: nova/virt/xenapi/vm_utils.py:940 nova/virt/xenapi/vmops.py:1040 msgid "Can't resize a disk to 0 GB." msgstr "No se puede cambiar el tamaño de archivo a 0 GB." -#: nova/virt/xenapi/vm_utils.py:990 +#: nova/virt/xenapi/vm_utils.py:992 msgid "Disk must have only one partition." msgstr "el disco debe tener una sola partición." -#: nova/virt/xenapi/vm_utils.py:995 +#: nova/virt/xenapi/vm_utils.py:997 #, python-format msgid "Disk contains a filesystem we are unable to resize: %s" msgstr "" "El disco contiene un sistema de archivos incapaz de modificar su tamaño: " "%s" -#: nova/virt/xenapi/vm_utils.py:1000 +#: nova/virt/xenapi/vm_utils.py:1002 msgid "The only partition should be partition 1." msgstr "La unica partición debe ser la partición 1." -#: nova/virt/xenapi/vm_utils.py:1011 +#: nova/virt/xenapi/vm_utils.py:1013 #, python-format msgid "Attempted auto_configure_disk failed because: %s" msgstr "El intento de auto_configure_disk ha fallado por: %s" -#: nova/virt/xenapi/vm_utils.py:1262 +#: nova/virt/xenapi/vm_utils.py:1264 #, python-format msgid "" "Fast cloning is only supported on default local SR of type ext. SR on " @@ -9906,24 +9492,24 @@ msgstr "" "ext. Se ha encontrado que los SR de este sistema son de tipo %s. " "Ignorando el identificador cow." -#: nova/virt/xenapi/vm_utils.py:1337 +#: nova/virt/xenapi/vm_utils.py:1339 #, python-format msgid "Unrecognized cache_images value '%s', defaulting to True" msgstr "" "Valor cache_images no reconocido '%s', se toma True como valor " "predeterminado" -#: nova/virt/xenapi/vm_utils.py:1413 +#: nova/virt/xenapi/vm_utils.py:1415 #, python-format msgid "Invalid value '%s' for torrent_images" msgstr "valor inválido '%s' para torrent_images" -#: nova/virt/xenapi/vm_utils.py:1436 +#: nova/virt/xenapi/vm_utils.py:1438 #, python-format msgid "Invalid value '%d' for image_compression_level" msgstr "Valor inválido '%d' para image_compression_level" -#: nova/virt/xenapi/vm_utils.py:1462 +#: nova/virt/xenapi/vm_utils.py:1464 #, python-format msgid "" "Download handler '%(handler)s' raised an exception, falling back to " @@ -9932,14 +9518,14 @@ msgstr "" "La descarga del manejador '%(handler)s' ha arrojado una excepción, " "restaurando hacia el manejador predeterminado '%(default_handler)s" -#: nova/virt/xenapi/vm_utils.py:1518 +#: nova/virt/xenapi/vm_utils.py:1520 #, python-format msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d" msgstr "" "El tamaño de la imagen %(size)d excede el tamaño permitido por el sabor " "%(allowed_size)d" -#: nova/virt/xenapi/vm_utils.py:1569 +#: nova/virt/xenapi/vm_utils.py:1571 #, python-format msgid "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " @@ -9948,26 +9534,26 @@ msgstr "" "La imagen de kernel/disco RAM es demasiado grande: %(vdi_size)d bytes, " "máx. %(max_size)d bytes" -#: nova/virt/xenapi/vm_utils.py:1611 +#: nova/virt/xenapi/vm_utils.py:1613 msgid "Failed to fetch glance image" msgstr "No se ha podido captar la imagen glance" -#: nova/virt/xenapi/vm_utils.py:1819 +#: nova/virt/xenapi/vm_utils.py:1846 #, python-format msgid "Unable to parse rrd of %s" msgstr "Incapaz de analizar rrd de %s" -#: nova/virt/xenapi/vm_utils.py:1849 +#: nova/virt/xenapi/vm_utils.py:1876 #, python-format msgid "Retry SR scan due to error: %s" msgstr "Reintentando escaneo de SR debido a error: %s" -#: nova/virt/xenapi/vm_utils.py:1882 +#: nova/virt/xenapi/vm_utils.py:1909 #, python-format msgid "Flag sr_matching_filter '%s' does not respect formatting convention" msgstr "El distintivo sr_matching_filter '%s' no respeta el convenio de formato" -#: nova/virt/xenapi/vm_utils.py:1903 +#: nova/virt/xenapi/vm_utils.py:1930 msgid "" "XenAPI is unable to find a Storage Repository to install guest instances " "on. Please check your configuration (e.g. set a default SR for the pool) " @@ -9978,11 +9564,11 @@ msgstr "" "establece un SR predeterminado en el conjunto) y/o ocnfigura el " "identificador 'sr_matching_filter'." -#: nova/virt/xenapi/vm_utils.py:1916 +#: nova/virt/xenapi/vm_utils.py:1943 msgid "Cannot find SR of content-type ISO" msgstr "No se puede encontrar SR de content-type ISO" -#: nova/virt/xenapi/vm_utils.py:1969 +#: nova/virt/xenapi/vm_utils.py:1996 #, python-format msgid "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " @@ -9991,22 +9577,22 @@ msgstr "" "No se ha podido obtener XML RRD para la máquina virtual %(vm_uuid)s con " "los detalles de servidor: %(server)s." -#: nova/virt/xenapi/vm_utils.py:2097 +#: nova/virt/xenapi/vm_utils.py:2124 #, python-format msgid "VHD coalesce attempts exceeded (%d), giving up..." msgstr "Intentos de incorporación de VHD excedidos (%d), dejando de intentar..." -#: nova/virt/xenapi/vm_utils.py:2132 +#: nova/virt/xenapi/vm_utils.py:2159 #, python-format msgid "Timeout waiting for device %s to be created" msgstr "Se ha excedido el tiempo esperando a que se creara el dispositivo %s" -#: nova/virt/xenapi/vm_utils.py:2152 +#: nova/virt/xenapi/vm_utils.py:2179 #, python-format msgid "Disconnecting stale VDI %s from compute domU" msgstr "Desconectando VDI obsoleto %s de domU de cálculo " -#: nova/virt/xenapi/vm_utils.py:2310 +#: nova/virt/xenapi/vm_utils.py:2337 msgid "" "Shrinking the filesystem down with resize2fs has failed, please check if " "you have enough free space on your disk." @@ -10014,40 +9600,40 @@ msgstr "" "La reducción del sistema de archivos con resize2fs ha fallado, por favor " "verifica si tienes espacio libre suficiente en tu disco." -#: nova/virt/xenapi/vm_utils.py:2445 +#: nova/virt/xenapi/vm_utils.py:2472 msgid "Manipulating interface files directly" msgstr "Manipulando archivos de interfaz directamente " -#: nova/virt/xenapi/vm_utils.py:2454 +#: nova/virt/xenapi/vm_utils.py:2481 #, python-format msgid "Failed to mount filesystem (expected for non-linux instances): %s" msgstr "" "No se ha podido montar sistema de archivos (se espera para instancias no " "Linux): %s " -#: nova/virt/xenapi/vm_utils.py:2566 +#: nova/virt/xenapi/vm_utils.py:2496 msgid "This domU must be running on the host specified by connection_url" msgstr "" "Este domU debe estar en ejecución en el anfitrión especificado por " "connection_url" -#: nova/virt/xenapi/vm_utils.py:2635 +#: nova/virt/xenapi/vm_utils.py:2565 msgid "Failed to transfer vhd to new host" msgstr "No se ha podido transferir vhd al nuevo host" -#: nova/virt/xenapi/vm_utils.py:2661 +#: nova/virt/xenapi/vm_utils.py:2591 msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..." msgstr "" "ipxe_boot_menu_url no establecido, el usuario debe ingresar la URL " "manualmente..." -#: nova/virt/xenapi/vm_utils.py:2667 +#: nova/virt/xenapi/vm_utils.py:2597 msgid "ipxe_network_name not set, user will have to enter IP manually..." msgstr "" "ipxe_network_name no establecido, el usuario debe ingresar la dirección " "IP manualmente..." -#: nova/virt/xenapi/vm_utils.py:2678 +#: nova/virt/xenapi/vm_utils.py:2608 #, python-format msgid "" "Unable to find network matching '%(network_name)s', user will have to " @@ -10056,7 +9642,7 @@ msgstr "" "Incapaz de encontrar red coincidente '%(network_name)s', el usuario " "deberá introducir una dirección IP manualmente..." -#: nova/virt/xenapi/vm_utils.py:2702 +#: nova/virt/xenapi/vm_utils.py:2632 #, python-format msgid "ISO creation tool '%s' does not exist." msgstr "La herramienta de creación de ISO '%s' no esiste." @@ -10065,42 +9651,42 @@ msgstr "La herramienta de creación de ISO '%s' no esiste." msgid "Error: Agent is disabled" msgstr "Error: El agente está inhabilitado" -#: nova/virt/xenapi/vmops.py:375 +#: nova/virt/xenapi/vmops.py:378 msgid "ipxe_boot is True but no ISO image found" msgstr "ipxe_boot establecido en True pero no se ha encontrado imagen ISO" -#: nova/virt/xenapi/vmops.py:518 +#: nova/virt/xenapi/vmops.py:521 msgid "Failed to spawn, rolling back" msgstr "No se ha podido generar, retrotrayendo" -#: nova/virt/xenapi/vmops.py:783 +#: nova/virt/xenapi/vmops.py:786 msgid "Unable to terminate instance." msgstr "Incapaz de terminar instancia." -#: nova/virt/xenapi/vmops.py:835 +#: nova/virt/xenapi/vmops.py:838 #, python-format msgid "_migrate_disk_resizing_down failed. Restoring orig vm due_to: %s." msgstr "_migrate_disk_resizing_down ha fallado. Restaurando vm original due_to: %s" -#: nova/virt/xenapi/vmops.py:989 +#: nova/virt/xenapi/vmops.py:992 #, python-format msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s." msgstr "_migrate_disk_resizing_up fallido. Restaurando vm original due_to: %s." -#: nova/virt/xenapi/vmops.py:996 +#: nova/virt/xenapi/vmops.py:999 #, python-format msgid "_migrate_disk_resizing_up failed to rollback: %s" msgstr "_migrate_disk_rezising_up fallido al revertir: %s" -#: nova/virt/xenapi/vmops.py:1013 +#: nova/virt/xenapi/vmops.py:1016 msgid "Can't resize down ephemeral disks." msgstr "No se puede reducir el tamaño de los discos efímeros." -#: nova/virt/xenapi/vmops.py:1124 +#: nova/virt/xenapi/vmops.py:1127 msgid "Starting halted instance found during reboot" msgstr "Iniciando instancia detenida encontrada durante rearranque" -#: nova/virt/xenapi/vmops.py:1130 +#: nova/virt/xenapi/vmops.py:1133 msgid "" "Reboot failed due to bad volumes, detaching bad volumes and starting " "halted instance" @@ -10108,65 +9694,65 @@ msgstr "" "Se ha encontrado un error en el rearranque debido a volúmenes erróneos; " "se van a desconectar los volúmenes erróneos e iniciar la instancia parada" -#: nova/virt/xenapi/vmops.py:1208 +#: nova/virt/xenapi/vmops.py:1211 msgid "Unable to update metadata, VM not found." msgstr "Incapaz de actualizar metadatos, la VM no ha sido encontrada." -#: nova/virt/xenapi/vmops.py:1254 +#: nova/virt/xenapi/vmops.py:1257 msgid "Unable to find root VBD/VDI for VM" msgstr "No se puede encontrar VBD/VDI de raíz para VM" -#: nova/virt/xenapi/vmops.py:1292 +#: nova/virt/xenapi/vmops.py:1295 msgid "instance has a kernel or ramdisk but not both" msgstr "la instancia tiene un kernel o un disco RAM, pero no ambos" -#: nova/virt/xenapi/vmops.py:1326 +#: nova/virt/xenapi/vmops.py:1329 msgid "Destroying VM" msgstr "Destruyendo VM " -#: nova/virt/xenapi/vmops.py:1355 +#: nova/virt/xenapi/vmops.py:1358 msgid "VM is not present, skipping destroy..." msgstr "VM no está presente, omitiendo destrucción... " -#: nova/virt/xenapi/vmops.py:1406 +#: nova/virt/xenapi/vmops.py:1409 #, python-format msgid "Instance is already in Rescue Mode: %s" msgstr "La instancia ya está en modalidad de rescate: %s " -#: nova/virt/xenapi/vmops.py:1448 +#: nova/virt/xenapi/vmops.py:1451 msgid "VM is not present, skipping soft delete..." msgstr "VM no está presente, omitiendo supresión no permanente... " -#: nova/virt/xenapi/vmops.py:1834 +#: nova/virt/xenapi/vmops.py:1843 #, python-format msgid "Destination host:%s must be in the same aggregate as the source server" msgstr "" "El anfitrión destino: %s debe estar en el mismo agregado que el servidor " "fuente" -#: nova/virt/xenapi/vmops.py:1855 +#: nova/virt/xenapi/vmops.py:1864 msgid "No suitable network for migrate" msgstr "No hay red adecuada para migrar" -#: nova/virt/xenapi/vmops.py:1861 +#: nova/virt/xenapi/vmops.py:1870 #, python-format msgid "PIF %s does not contain IP address" msgstr "PIC %s no contiene una dirección IP" -#: nova/virt/xenapi/vmops.py:1874 +#: nova/virt/xenapi/vmops.py:1883 msgid "Migrate Receive failed" msgstr "Ha fallado la recepción de migración" -#: nova/virt/xenapi/vmops.py:1948 +#: nova/virt/xenapi/vmops.py:1957 msgid "XAPI supporting relax-xsm-sr-check=true required" msgstr "Se requiere una XAPI que soporte relax-xsm-sr-check=true" -#: nova/virt/xenapi/vmops.py:1959 +#: nova/virt/xenapi/vmops.py:1968 #, python-format msgid "assert_can_migrate failed because: %s" msgstr "assert_can_migrate ha fallado debido a: %s" -#: nova/virt/xenapi/vmops.py:2019 +#: nova/virt/xenapi/vmops.py:2028 msgid "Migrate Send failed" msgstr "Ha fallado el envío de migración" @@ -10223,6 +9809,11 @@ msgstr "Punto de montaje no puede ser traducido: %s" msgid "Unable to find SR from VBD %s" msgstr "Imposible encontrar SR en VBD %s" +#: nova/virt/xenapi/volume_utils.py:311 +#, python-format +msgid "Unable to find SR from VDI %s" +msgstr "" + #: nova/virt/xenapi/volumeops.py:63 #, python-format msgid "Connected volume (vdi_uuid): %s" @@ -10309,11 +9900,16 @@ msgstr "Error inesperado: %s " msgid "Starting nova-xvpvncproxy node (version %s)" msgstr "Iniciando el nodo nova-xvpvncproxy (versión %s)" -#: nova/volume/cinder.py:236 +#: nova/volume/cinder.py:257 +#, python-format +msgid "Invalid client version, must be one of: %s" +msgstr "" + +#: nova/volume/cinder.py:281 msgid "status must be 'in-use'" msgstr "el estado debe estar 'in-use'" -#: nova/volume/cinder.py:242 +#: nova/volume/cinder.py:287 msgid "status must be 'available'" msgstr "el estado debe ser 'available'" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po index 2efca429e6..254ad5bfbb 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-05-30 06:26+0000\n" "Last-Translator: FULL NAME \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" @@ -19,16 +19,18 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -#: nova/virt/vmwareapi/driver.py:864 +#: nova/api/openstack/__init__.py:331 +#, python-format +msgid "Missing core API extensions: %s" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:658 #, python-format msgid "" "Unable to connect to server at %(server)s, sleeping for %(seconds)s seconds" msgstr "" -#: nova/virt/vmwareapi/driver.py:973 +#: nova/virt/vmwareapi/driver.py:767 #, python-format msgid "In vmwareapi: _call_method (session=%s)" msgstr "" - -#~ msgid "Dummy message for transifex setup." -#~ msgstr "message fictif pour la configuration transifex" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-error.po b/nova/locale/fr/LC_MESSAGES/nova-log-error.po index 3031dc40da..e4ad002caf 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" @@ -39,16 +39,265 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Impossible d'avertir les cellules de l'erreur d'instance" @@ -68,11 +317,11 @@ msgstr "Exception inattendue survenue %d fois... Nouvel essai." msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "dans l'appel en boucle de durée fixe" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "dans l'appel en boucle dynamique" @@ -121,137 +370,151 @@ msgstr "Exception BD encapsulée." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -272,15 +535,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -299,8 +566,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-info.po b/nova/locale/fr/LC_MESSAGES/nova-log-info.po index 08f0723004..6d7f745448 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" -"PO-Revision-Date: 2014-07-16 14:42+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-08-07 07:51+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" "fr/)\n" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s retourné avec HTTP %(status)d" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "Erreur générée : %s" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Exception HTTP générée : %s" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "Suppression du réseau avec l'ID %s" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,93 +146,97 @@ msgstr "Suppression ligne en double avec l'ID : %(id)s de la table : %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "Instance détruite avec succès." -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "L'instance peut être redémarrée." -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "Tentative de redestruction de l'instance." -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "Démarrage du processus d'instantané en temps réel" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "Démarrage du processus d'instantané à froid" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "Instantané extrait, démarrage du téléchargement d'image" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "Téléchargement d'image instantanée terminé" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "Instance redémarrée par logiciel avec succès." -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "L'instance s'est arrêtée avec succès." -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "L'instance a sans doute été redémarrée par logiciel ; retour en cours." -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "L'instance a redémarré avec succès." -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "Instance générée avec succès." -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Journal de console tronqué retourné, %d octets ignorés" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "Création de l'image" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "Utilisation de l'unité de config" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "Création de l'unité de config à %(path)s" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -203,7 +246,7 @@ msgstr "" "être détaché. Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s " "Erreur=%(e)s" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -212,39 +255,39 @@ msgstr "" "Domaine introuvable dans libvirt pour l'instance %s. Impossible d'obtenir " "les stats de bloc pour l'unité" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "L'instance s'exécute avec succès." -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "setup_basic_filtering appelé dans nwfilter" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "Garantie des filtres statiques" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "" "Vous avez essayé d'annuler le filtre d'une instance qui n'est pas filtrée" @@ -307,11 +350,11 @@ msgstr "Fichiers de base endommagés : %s" msgid "Removable base files: %s" msgstr "Fichiers de base pouvant être retirés : %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/it/LC_MESSAGES/nova-log-info.po b/nova/locale/it/LC_MESSAGES/nova-log-info.po index 65f0db86ff..8449d18757 100644 --- a/nova/locale/it/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/it/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" -"PO-Revision-Date: 2014-07-16 14:42+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"PO-Revision-Date: 2014-08-07 07:51+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Italian (http://www.transifex.com/projects/p/nova/language/" "it/)\n" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s restituito con HTTP %(status)d" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "Errore generato: %s" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Generata eccezione HTTP: %s" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "Eliminazione della rete con id %s" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -108,102 +147,106 @@ msgstr "Cancellata riga duplicata con id: %(id)s dalla tablella: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "Istanza distrutta correttamente." -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "L'istanza può essere avviata di nuovo." -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "L'istanza verrà nuovamente distrutta." -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "Inizio processo attivo istantanea" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "Inizio processo di istantanea a freddo" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "Istantanea estratta, inizio caricamento immagine" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "Caricamento immagine istantanea completato" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "Avvio a caldo dell'istanza eseguito correttamente." -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "Chiusura dell'istanza eseguita correttamente." -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "L'istanza potrebbe essere stat riavviata durante l'avvio a caldo, quindi " "ritornare adesso." -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "Istanza riavviata correttamente." -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "Istanza generata correttamente." -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "dati: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Restituito log della console troncato, %d byte ignorati" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "Creazione immagine" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "Utilizzo unità di config" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creazione unità config in %(path)s" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -212,39 +255,39 @@ msgstr "" "Impossibile trovare il dominio in libvirt per l'istanza %s. Impossibile " "ottenere le statistiche del blocco per l'unità" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "Istanza in esecuzione correttamente." -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "Chiamato setup_basic_filtering in nwfilter" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "Controllo dei filtri statici" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "Si è tentato di rimuovere il filtro da un'istanza senza filtro" @@ -305,11 +348,11 @@ msgstr "File di base danneggiato: %s" msgid "Removable base files: %s" msgstr "File di base rimovibili: %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-error.po b/nova/locale/ja/LC_MESSAGES/nova-log-error.po index ea9903b1fb..6dbab52058 100644 --- a/nova/locale/ja/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/ja/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-20 16:41+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/" @@ -39,16 +39,265 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "インスタンスの障害をセルに通知できませんでした" @@ -68,11 +317,11 @@ msgstr "予期せぬ例外が、%d回()発生しました。再試行中。" msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "一定期間の呼び出しループ" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "動的呼び出しループ" @@ -121,137 +370,151 @@ msgstr "DB 例外がラップされました。" msgid "Failed to migrate to version %s on engine %s" msgstr "バージョン%sをエンジン%sへの移行が失敗しました。" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -274,15 +537,19 @@ msgstr "%(base_file)s の削除に失敗しました。エラーは %(error)s" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -301,8 +568,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-info.po b/nova/locale/ja/LC_MESSAGES/nova-log-info.po index 2f1a79f4ee..4e059ace67 100644 --- a/nova/locale/ja/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/ja/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-30 04:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,104 +146,108 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "インスタンスが正常に破棄されました。" -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "インスタンスを再び開始できます。" -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "インスタンスの破棄を再び行います。" -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "ライブ・スナップショット・プロセスを開始しています" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "コールド・スナップショット・プロセスを開始しています" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "" "スナップショットが抽出されました。イメージのアップロードを開始しています" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "スナップショット・イメージのアップロードが完了しました" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "インスタンスが正常にソフト・リブートされました。" -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "インスタンスが正常にシャットダウンされました。" -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "インスタンスはソフト・リブート時にリブートされた可能性があるため、ここで返し" "ます。" -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "インスタンスが正常にリブートされました。" -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "インスタンスが正常に作成されました。" -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "データ: %(data)r, ファイルパス: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" "切り捨てられたコンソール・ログが返されました。%d バイトが無視されました" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "イメージの作成中" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "構成ドライブを使用中" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "構成ドライブを %(path)s に作成しています" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -213,39 +256,39 @@ msgstr "" "インスタンス %s 用のドメインが Libvirt 内で見つかりませんでした。デバイスのブ" "ロックの統計を取得できません" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "インスタンスが正常に実行されています。" -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "nwfilter で setup_basic_filtering を呼び出しました" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "静的フィルターの確認中" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "" "フィルター処理されていないインスタンスに対してフィルター処理の取り消しが試み" @@ -308,11 +351,11 @@ msgstr "破損した基本ファイル: %s" msgid "Removable base files: %s" msgstr "削除可能な基本ファイル: %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po index a565b529c5..8e2b069f4e 100644 --- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-16 04:10+0000\n" "Last-Translator: jaekwon.park \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/" @@ -40,16 +40,265 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "셀에 인스턴스 결함을 알리지 못했음" @@ -69,11 +318,11 @@ msgstr "예기치 않은 예외 %d 번 발생하였습니다... 다시 시도중 msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "고정 기간 루프 호출에서" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "동적 루프 호출에서" @@ -122,137 +371,151 @@ msgstr "DB 예외가 랩핑되었습니다." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -273,15 +536,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -300,8 +567,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po index 20799e4858..20c80f17cd 100644 --- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-30 04:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,101 +146,105 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "인스턴스가 영구 삭제되었습니다. " -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "인스턴스가 다시 시작됩니다." -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "인스턴스를 다시 영구 삭제하려 합니다." -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "라이브 스냅샷 프로세스 시작 중" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "콜드 스냅샷 프로세스 시작 중" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "스냅샷 추출, 이미지 업로드 시작 중" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "스냅샷 이미지 업로드 완료" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "인스턴스가 소프트 리부트되었습니다. " -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "인스턴스가 시스템 종료되었습니다. " -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "인스턴스가 소프트 리부트 중에 다시 부팅되었을 수 있으므로, 지금 리턴합니다. " -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "인스턴스가 다시 부트되었습니다. " -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "인스턴스가 파생되었습니다. " -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "데이터: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "잘린 콘솔 로그가 리턴되었으며, %d 바이트는 무시됨" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "이미지 작성 중" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "구성 드라이브 사용 중" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "%(path)s에 구성 드라이브 작성 중" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -210,39 +253,39 @@ msgstr "" "%s 인스턴스에 대한 libvirt에서 도메인을 찾을 수 없습니다. 디바이스의 블록 통" "계를 가져올 수 없음" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "인스턴스가 정상적으로 실행 중입니다. " -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "nwfilter에서 setup_basic_filtering을 호출했음" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "정적 필터 확인 중" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "필터링되지 않는 인스턴스를 필터링 해제하려고 했음" @@ -302,11 +345,11 @@ msgstr "손상된 기본 파일: %s" msgid "Removable base files: %s" msgstr "제거 가능한 기본 파일: %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/nova-log-critical.pot b/nova/locale/nova-log-critical.pot index f2c4fd2733..6455c0c05d 100644 --- a/nova/locale/nova-log-critical.pot +++ b/nova/locale/nova-log-critical.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" +"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,14 +17,19 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/virt/vmwareapi/driver.py:864 +#: nova/api/openstack/__init__.py:331 +#, python-format +msgid "Missing core API extensions: %s" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:658 #, python-format msgid "" "Unable to connect to server at %(server)s, sleeping for %(seconds)s " "seconds" msgstr "" -#: nova/virt/vmwareapi/driver.py:973 +#: nova/virt/vmwareapi/driver.py:767 #, python-format msgid "In vmwareapi: _call_method (session=%s)" msgstr "" diff --git a/nova/locale/nova-log-error.pot b/nova/locale/nova-log-error.pot index 336dbaa455..7801bdb8cf 100644 --- a/nova/locale/nova-log-error.pot +++ b/nova/locale/nova-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" +"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -37,15 +37,264 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. " +"Bad upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "" @@ -65,11 +314,11 @@ msgstr "" msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "" @@ -118,139 +367,154 @@ msgstr "" msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the" +" compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': " +"%(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to " "Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to " "Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to " "take effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -271,15 +535,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -298,8 +566,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" + diff --git a/nova/locale/nova-log-info.pot b/nova/locale/nova-log-info.pot index 196a2c7327..4f11257f2b 100644 --- a/nova/locale/nova-log-info.pot +++ b/nova/locale/nova-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" +"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,11 +17,50 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent " +"index already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -104,140 +143,144 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: " "%(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. " "Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats " "for device" msgstr "" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -295,11 +338,11 @@ msgstr "" msgid "Removable base files: %s" msgstr "" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/nova-log-warning.pot b/nova/locale/nova-log-warning.pot index 8e9cbeba3b..0e497fc11b 100644 --- a/nova/locale/nova-log-warning.pot +++ b/nova/locale/nova-log-warning.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" +"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,15 +17,142 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/compute/manager.py:2002 +#: nova/api/auth.py:73 +msgid "ratelimit_v3 is removed from v3 api." +msgstr "" + +#: nova/api/auth.py:160 +msgid "Sourcing roles from deprecated X-Role HTTP header" +msgstr "" + +#: nova/api/ec2/__init__.py:169 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: nova/api/ec2/cloud.py:1289 +#: nova/api/openstack/compute/contrib/floating_ips.py:254 +#, python-format +msgid "multiple fixed_ips exist, using the first: %s" +msgstr "" + +#: nova/api/metadata/handler.py:119 +msgid "" +"X-Instance-ID present in request headers. The 'service_metadata_proxy' " +"option must be enabled to process this header." +msgstr "" + +#: nova/api/metadata/handler.py:189 +#, python-format +msgid "" +"X-Instance-ID-Signature: %(signature)s does not match the expected value:" +" %(expected_signature)s for id: %(instance_id)s. Request From: " +"%(remote_address)s" +msgstr "" + +#: nova/api/metadata/handler.py:215 +#, python-format +msgid "" +"Tenant_id %(tenant_id)s does not match tenant_id of instance " +"%(instance_id)s." +msgstr "" + +#: nova/api/metadata/vendordata_json.py:47 +msgid "file does not exist" +msgstr "" + +#: nova/api/metadata/vendordata_json.py:49 +msgid "Unexpected IOError when reading" +msgstr "" + +#: nova/api/metadata/vendordata_json.py:53 +msgid "failed to load json" +msgstr "" + +#: nova/api/openstack/__init__.py:235 nova/api/openstack/__init__.py:409 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: nova/api/openstack/__init__.py:282 +#: nova/api/openstack/compute/plugins/v3/servers.py:104 +#, python-format +msgid "Not loading %s because it is in the blacklist" +msgstr "" + +#: nova/api/openstack/__init__.py:287 +#: nova/api/openstack/compute/plugins/v3/servers.py:109 +#, python-format +msgid "Not loading %s because it is not in the whitelist" +msgstr "" + +#: nova/api/openstack/__init__.py:307 +#, python-format +msgid "Extensions in both blacklist and whitelist: %s" +msgstr "" + +#: nova/api/openstack/common.py:456 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: nova/api/openstack/extensions.py:279 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: nova/api/openstack/compute/servers.py:82 +msgid "" +"XML support has been deprecated and may be removed as early as the Juno " +"release." +msgstr "" + +#: nova/api/openstack/compute/views/servers.py:197 +msgid "Instance has had its instance_type removed from the DB" +msgstr "" + +#: nova/compute/manager.py:2016 msgid "No more network or fixed IP to be allocated" msgstr "" -#: nova/compute/manager.py:2267 +#: nova/compute/manager.py:2256 +#, python-format +msgid "Ignoring EndpointNotFound: %s" +msgstr "" + +#: nova/compute/manager.py:2274 #, python-format msgid "Failed to delete volume: %(volume_id)s due to %(exc)s" msgstr "" +#: nova/compute/utils.py:204 +#, python-format +msgid "Can't access image %(image_id)s: %(error)s" +msgstr "" + +#: nova/compute/utils.py:328 +#, python-format +msgid "" +"No host name specified for the notification of HostAPI.%s and it will be " +"ignored" +msgstr "" + +#: nova/compute/utils.py:456 +#, python-format +msgid "" +"Value of 0 or None specified for %s. This behaviour will change in " +"meaning in the K release, to mean 'call at the default rate' rather than " +"'do not call'. To keep the 'do not call' behaviour, use a negative value." +msgstr "" + +#: nova/compute/resources/__init__.py:31 +#, python-format +msgid "Compute resource plugin %s was not loaded" +msgstr "" + #: nova/consoleauth/manager.py:84 #, python-format msgid "Token: %(token)s failed to save into memcached." @@ -36,20 +163,37 @@ msgstr "" msgid "Instance: %(instance_uuid)s failed to save into memcached" msgstr "" -#: nova/openstack/common/loopingcall.py:82 +#: nova/network/neutronv2/api.py:214 #, python-format -msgid "task run outlasted interval by %s sec" +msgid "Neutron error: Port quota exceeded in tenant: %s" msgstr "" -#: nova/openstack/common/network_utils.py:146 +#: nova/network/neutronv2/api.py:219 +#, python-format +msgid "Neutron error: No more fixed IPs in network: %s" +msgstr "" + +#: nova/network/neutronv2/api.py:223 +#, python-format +msgid "" +"Neutron error: MAC address %(mac)s is already in use on network " +"%(network)s." +msgstr "" + +#: nova/openstack/common/loopingcall.py:87 +#, python-format +msgid "task %(func_name)s run outlasted interval by %(delay).2f sec" +msgstr "" + +#: nova/openstack/common/network_utils.py:145 msgid "tcp_keepidle not available on your system" msgstr "" -#: nova/openstack/common/network_utils.py:153 +#: nova/openstack/common/network_utils.py:152 msgid "tcp_keepintvl not available on your system" msgstr "" -#: nova/openstack/common/network_utils.py:160 +#: nova/openstack/common/network_utils.py:159 msgid "tcp_keepknt not available on your system" msgstr "" @@ -77,7 +221,7 @@ msgstr "" msgid "SQL connection failed. %s attempts left." msgstr "" -#: nova/openstack/common/db/sqlalchemy/utils.py:97 +#: nova/openstack/common/db/sqlalchemy/utils.py:96 msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" @@ -85,7 +229,7 @@ msgstr "" msgid "VCPUs not set; assuming CPU collection broken" msgstr "" -#: nova/scheduler/filters/core_filter.py:92 +#: nova/scheduler/filters/core_filter.py:102 #, python-format msgid "Could not decode cpu_allocation_ratio: '%s'" msgstr "" @@ -95,12 +239,26 @@ msgstr "" msgid "Could not decode ram_allocation_ratio: '%s'" msgstr "" -#: nova/virt/libvirt/driver.py:374 +#: nova/virt/disk/api.py:366 +#, python-format +msgid "Ignoring error injecting data into image %(image)s (%(e)s)" +msgstr "" + +#: nova/virt/disk/api.py:456 +#, python-format +msgid "Ignoring error injecting %(inject)s into image (%(e)s)" +msgstr "" + +#: nova/virt/disk/vfs/api.py:44 +msgid "Unable to import guestfs, falling back to VFSLocalFS" +msgstr "" + +#: nova/virt/libvirt/driver.py:370 #, python-format msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s." msgstr "" -#: nova/virt/libvirt/driver.py:620 +#: nova/virt/libvirt/driver.py:616 #, python-format msgid "" "The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack " @@ -108,122 +266,122 @@ msgid "" "see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix" msgstr "" -#: nova/virt/libvirt/driver.py:671 +#: nova/virt/libvirt/driver.py:673 #, python-format msgid "URI %(uri)s does not support events: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:687 +#: nova/virt/libvirt/driver.py:689 #, python-format msgid "URI %(uri)s does not support connection events: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:921 msgid "Cannot destroy instance, operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:953 +#: nova/virt/libvirt/driver.py:945 msgid "During wait destroy, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1029 msgid "Instance may be still running, destroy it again." msgstr "" -#: nova/virt/libvirt/driver.py:1088 +#: nova/virt/libvirt/driver.py:1082 #, python-format msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s" msgstr "" -#: nova/virt/libvirt/driver.py:1141 +#: nova/virt/libvirt/driver.py:1132 #, python-format msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually" msgstr "" -#: nova/virt/libvirt/driver.py:1415 nova/virt/libvirt/driver.py:1423 +#: nova/virt/libvirt/driver.py:1408 nova/virt/libvirt/driver.py:1416 msgid "During detach_volume, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1466 +#: nova/virt/libvirt/driver.py:1461 msgid "During detach_interface, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:2053 +#: nova/virt/libvirt/driver.py:2051 msgid "Failed to soft reboot instance. Trying hard reboot." msgstr "" -#: nova/virt/libvirt/driver.py:2614 +#: nova/virt/libvirt/driver.py:2608 #, python-format msgid "Image %s not found on disk storage. Continue without injecting data" msgstr "" -#: nova/virt/libvirt/driver.py:2777 +#: nova/virt/libvirt/driver.py:2795 msgid "File injection into a boot from volume instance is not supported" msgstr "" -#: nova/virt/libvirt/driver.py:2852 +#: nova/virt/libvirt/driver.py:2870 msgid "Instance disappeared while detaching a PCI device from it." msgstr "" -#: nova/virt/libvirt/driver.py:2907 +#: nova/virt/libvirt/driver.py:2925 #, python-format msgid "Cannot update service status on host: %s,since it is not registered." msgstr "" -#: nova/virt/libvirt/driver.py:2910 +#: nova/virt/libvirt/driver.py:2928 #, python-format msgid "Cannot update service status on host: %s,due to an unexpected exception." msgstr "" -#: nova/virt/libvirt/driver.py:2938 +#: nova/virt/libvirt/driver.py:2956 #, python-format msgid "URI %(uri)s does not support full set of host capabilities: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:3763 +#: nova/virt/libvirt/driver.py:3785 #, python-format msgid "Timeout waiting for vif plugging callback for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3784 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3841 +#: nova/virt/libvirt/driver.py:3863 msgid "" "Cannot get the number of cpu, because this function is not implemented " "for this platform. " msgstr "" -#: nova/virt/libvirt/driver.py:3901 +#: nova/virt/libvirt/driver.py:3925 #, python-format msgid "couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3932 +#: nova/virt/libvirt/driver.py:3956 #, python-format msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4134 +#: nova/virt/libvirt/driver.py:4158 #, python-format msgid "URI %(uri)s does not support listDevices: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:4789 +#: nova/virt/libvirt/driver.py:4813 #, python-format msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d." msgstr "" -#: nova/virt/libvirt/driver.py:4990 +#: nova/virt/libvirt/driver.py:5023 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4998 +#: nova/virt/libvirt/driver.py:5031 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -231,7 +389,7 @@ msgid "" "resize." msgstr "" -#: nova/virt/libvirt/driver.py:5004 +#: nova/virt/libvirt/driver.py:5037 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -239,7 +397,7 @@ msgid "" "exists on the compute node but is not managed by Nova." msgstr "" -#: nova/virt/libvirt/firewall.py:49 +#: nova/virt/libvirt/firewall.py:50 msgid "" "Libvirt module could not be loaded. NWFilterFirewall will not work " "correctly." @@ -272,20 +430,13 @@ msgid "" "%(free_space)db." msgstr "" -#: nova/virt/libvirt/utils.py:69 nova/virt/libvirt/utils.py:75 -msgid "systool is not installed" -msgstr "" - -#: nova/virt/libvirt/utils.py:248 +#: nova/virt/libvirt/rbd.py:268 #, python-format -msgid "rbd remove %(name)s in pool %(pool)s failed" +msgid "rbd remove %(volume)s in pool %(pool)s failed" msgstr "" -#: nova/virt/libvirt/vif.py:767 -#, python-format -msgid "" -"VIF driver \"%s\" is marked as deprecated and will be removed in the Juno" -" release." +#: nova/virt/libvirt/utils.py:69 nova/virt/libvirt/utils.py:75 +msgid "systool is not installed" msgstr "" #: nova/virt/libvirt/volume.py:132 @@ -346,3 +497,25 @@ msgstr "" msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" +#: nova/virt/vmwareapi/driver.py:95 +msgid "" +"The VMware ESX driver is now deprecated and has been removed in the Juno " +"release. The VC driver will remain and continue to be supported." +msgstr "" + +#: nova/virt/vmwareapi/driver.py:150 +#, python-format +msgid "The following clusters could not be found in the vCenter %s" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:179 +msgid "Instance cannot be found in host, or in an unknownstate." +msgstr "" + +#: nova/volume/cinder.py:249 +msgid "" +"Cinder V1 API is deprecated as of the Juno release, and Nova is still " +"configured to use it. Enable the V2 API in Cinder and set " +"cinder_catalog_info in nova.conf to use it." +msgstr "" + diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot index 54341e37c1..b431f5ef8d 100644 --- a/nova/locale/nova.pot +++ b/nova/locale/nova.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev763.g740fa02\n" +"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,39 +17,39 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: nova/block_device.py:100 +#: nova/block_device.py:102 msgid "Some fields are invalid." msgstr "" -#: nova/block_device.py:110 +#: nova/block_device.py:112 msgid "Some required fields are missing" msgstr "" -#: nova/block_device.py:126 +#: nova/block_device.py:128 msgid "Boot index is invalid." msgstr "" -#: nova/block_device.py:169 +#: nova/block_device.py:171 msgid "Unrecognized legacy format." msgstr "" -#: nova/block_device.py:186 +#: nova/block_device.py:188 msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:190 +#: nova/block_device.py:192 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:369 +#: nova/block_device.py:371 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:373 +#: nova/block_device.py:375 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:383 +#: nova/block_device.py:385 msgid "Invalid volume_size." msgstr "" @@ -328,7 +328,7 @@ msgstr "" msgid "Group not valid. Reason: %(reason)s" msgstr "" -#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:58 +#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:57 msgid "Sort key supplied was not valid." msgstr "" @@ -688,1085 +688,1108 @@ msgid "" msgstr "" #: nova/exception.py:654 -msgid "Could not find the datastore reference(s) which the VM uses." +#, python-format +msgid "Physical network is missing for network %(network_uuid)s" msgstr "" #: nova/exception.py:658 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: nova/exception.py:662 #, python-format msgid "Port %(port_id)s is still in use." msgstr "" -#: nova/exception.py:662 +#: nova/exception.py:666 #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "" -#: nova/exception.py:666 +#: nova/exception.py:670 #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "" -#: nova/exception.py:670 +#: nova/exception.py:674 #, python-format msgid "No free port available for instance %(instance)s." msgstr "" -#: nova/exception.py:674 +#: nova/exception.py:678 #, python-format msgid "Fixed ip %(address)s already exists." msgstr "" -#: nova/exception.py:678 +#: nova/exception.py:682 #, python-format msgid "No fixed IP associated with id %(id)s." msgstr "" -#: nova/exception.py:682 +#: nova/exception.py:686 #, python-format msgid "Fixed ip not found for address %(address)s." msgstr "" -#: nova/exception.py:686 +#: nova/exception.py:690 #, python-format msgid "Instance %(instance_uuid)s has zero fixed ips." msgstr "" -#: nova/exception.py:690 +#: nova/exception.py:694 #, python-format msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." msgstr "" -#: nova/exception.py:695 +#: nova/exception.py:699 #, python-format msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'." msgstr "" -#: nova/exception.py:699 +#: nova/exception.py:703 #, python-format msgid "" "Fixed IP address (%(address)s) does not exist in network " "(%(network_uuid)s)." msgstr "" -#: nova/exception.py:704 +#: nova/exception.py:708 #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s." msgstr "" -#: nova/exception.py:709 +#: nova/exception.py:713 #, python-format msgid "More than one instance is associated with fixed ip address '%(address)s'." msgstr "" -#: nova/exception.py:714 +#: nova/exception.py:718 #, python-format msgid "Fixed IP address %(address)s is invalid." msgstr "" -#: nova/exception.py:719 +#: nova/exception.py:723 msgid "Zero fixed ips available." msgstr "" -#: nova/exception.py:723 +#: nova/exception.py:727 msgid "Zero fixed ips could be found." msgstr "" -#: nova/exception.py:727 +#: nova/exception.py:731 #, python-format msgid "Floating ip %(address)s already exists." msgstr "" -#: nova/exception.py:732 +#: nova/exception.py:736 #, python-format msgid "Floating ip not found for id %(id)s." msgstr "" -#: nova/exception.py:736 +#: nova/exception.py:740 #, python-format msgid "The DNS entry %(name)s already exists in domain %(domain)s." msgstr "" -#: nova/exception.py:740 +#: nova/exception.py:744 #, python-format msgid "Floating ip not found for address %(address)s." msgstr "" -#: nova/exception.py:744 +#: nova/exception.py:748 #, python-format msgid "Floating ip not found for host %(host)s." msgstr "" -#: nova/exception.py:748 +#: nova/exception.py:752 #, python-format msgid "Multiple floating ips are found for address %(address)s." msgstr "" -#: nova/exception.py:752 +#: nova/exception.py:756 msgid "Floating ip pool not found." msgstr "" -#: nova/exception.py:757 +#: nova/exception.py:761 msgid "Zero floating ips available." msgstr "" -#: nova/exception.py:763 +#: nova/exception.py:767 #, python-format msgid "Floating ip %(address)s is associated." msgstr "" -#: nova/exception.py:767 +#: nova/exception.py:771 #, python-format msgid "Floating ip %(address)s is not associated." msgstr "" -#: nova/exception.py:771 +#: nova/exception.py:775 msgid "Zero floating ips exist." msgstr "" -#: nova/exception.py:776 +#: nova/exception.py:780 #, python-format msgid "Interface %(interface)s not found." msgstr "" -#: nova/exception.py:781 nova/api/openstack/compute/contrib/floating_ips.py:97 +#: nova/exception.py:785 nova/api/openstack/compute/contrib/floating_ips.py:98 msgid "Cannot disassociate auto assigned floating ip" msgstr "" -#: nova/exception.py:786 +#: nova/exception.py:790 #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "" -#: nova/exception.py:790 +#: nova/exception.py:794 #, python-format msgid "Service %(service_id)s could not be found." msgstr "" -#: nova/exception.py:794 +#: nova/exception.py:798 #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "" -#: nova/exception.py:798 +#: nova/exception.py:802 #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "" -#: nova/exception.py:802 +#: nova/exception.py:806 #, python-format msgid "Host %(host)s could not be found." msgstr "" -#: nova/exception.py:806 +#: nova/exception.py:810 #, python-format msgid "Compute host %(host)s could not be found." msgstr "" -#: nova/exception.py:810 +#: nova/exception.py:814 #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" -#: nova/exception.py:814 +#: nova/exception.py:818 #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "" -#: nova/exception.py:818 +#: nova/exception.py:822 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " "%(unders)s" msgstr "" -#: nova/exception.py:823 +#: nova/exception.py:827 +#, python-format +msgid "Wrong quota method %(method)s used on resource %(res)s" +msgstr "" + +#: nova/exception.py:831 msgid "Quota could not be found" msgstr "" -#: nova/exception.py:827 +#: nova/exception.py:835 #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" -#: nova/exception.py:832 +#: nova/exception.py:840 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "" -#: nova/exception.py:836 +#: nova/exception.py:844 #, python-format msgid "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:841 +#: nova/exception.py:849 #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:845 +#: nova/exception.py:853 #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "" -#: nova/exception.py:849 +#: nova/exception.py:857 #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "" -#: nova/exception.py:853 +#: nova/exception.py:861 #, python-format msgid "Quota reservation %(uuid)s could not be found." msgstr "" -#: nova/exception.py:857 +#: nova/exception.py:865 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: nova/exception.py:861 +#: nova/exception.py:869 #, python-format msgid "Security group %(security_group_id)s not found." msgstr "" -#: nova/exception.py:865 +#: nova/exception.py:873 #, python-format msgid "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" -#: nova/exception.py:870 +#: nova/exception.py:878 #, python-format msgid "Security group with rule %(rule_id)s not found." msgstr "" -#: nova/exception.py:875 +#: nova/exception.py:883 #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" -#: nova/exception.py:880 +#: nova/exception.py:888 #, python-format msgid "" "Security group %(security_group_id)s is already associated with the " "instance %(instance_id)s" msgstr "" -#: nova/exception.py:885 +#: nova/exception.py:893 #, python-format msgid "" "Security group %(security_group_id)s is not associated with the instance " "%(instance_id)s" msgstr "" -#: nova/exception.py:890 +#: nova/exception.py:898 #, python-format msgid "Security group default rule (%rule_id)s not found." msgstr "" -#: nova/exception.py:894 +#: nova/exception.py:902 msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" -#: nova/exception.py:900 +#: nova/exception.py:908 #, python-format msgid "Rule already exists in group: %(rule)s" msgstr "" -#: nova/exception.py:904 +#: nova/exception.py:912 msgid "No Unique Match Found." msgstr "" -#: nova/exception.py:909 +#: nova/exception.py:917 #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "" -#: nova/exception.py:913 +#: nova/exception.py:921 #, python-format msgid "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" -#: nova/exception.py:918 +#: nova/exception.py:926 #, python-format msgid "Console pool %(pool_id)s could not be found." msgstr "" -#: nova/exception.py:922 +#: nova/exception.py:930 #, python-format msgid "" "Console pool with host %(host)s, console_type %(console_type)s and " "compute_host %(compute_host)s already exists." msgstr "" -#: nova/exception.py:928 +#: nova/exception.py:936 #, python-format msgid "" "Console pool of type %(console_type)s for compute host %(compute_host)s " "on proxy host %(host)s not found." msgstr "" -#: nova/exception.py:934 +#: nova/exception.py:942 #, python-format msgid "Console %(console_id)s could not be found." msgstr "" -#: nova/exception.py:938 +#: nova/exception.py:946 #, python-format msgid "Console for instance %(instance_uuid)s could not be found." msgstr "" -#: nova/exception.py:942 +#: nova/exception.py:950 #, python-format msgid "" "Console for instance %(instance_uuid)s in pool %(pool_id)s could not be " "found." msgstr "" -#: nova/exception.py:947 +#: nova/exception.py:955 #, python-format msgid "Invalid console type %(console_type)s" msgstr "" -#: nova/exception.py:951 +#: nova/exception.py:959 #, python-format msgid "Unavailable console type %(console_type)s." msgstr "" -#: nova/exception.py:955 +#: nova/exception.py:963 #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" -#: nova/exception.py:960 +#: nova/exception.py:968 #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "" -#: nova/exception.py:964 +#: nova/exception.py:972 #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "" -#: nova/exception.py:968 +#: nova/exception.py:976 #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" -#: nova/exception.py:973 +#: nova/exception.py:981 #, python-format msgid "" "Flavor %(id)d extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" -#: nova/exception.py:978 +#: nova/exception.py:986 #, python-format msgid "Cell %(cell_name)s doesn't exist." msgstr "" -#: nova/exception.py:982 +#: nova/exception.py:990 #, python-format msgid "Cell with name %(name)s already exists." msgstr "" -#: nova/exception.py:986 +#: nova/exception.py:994 #, python-format msgid "Inconsistency in cell routing: %(reason)s" msgstr "" -#: nova/exception.py:990 +#: nova/exception.py:998 #, python-format msgid "Service API method not found: %(detail)s" msgstr "" -#: nova/exception.py:994 +#: nova/exception.py:1002 msgid "Timeout waiting for response from cell" msgstr "" -#: nova/exception.py:998 +#: nova/exception.py:1006 #, python-format msgid "Cell message has reached maximum hop count: %(hop_count)s" msgstr "" -#: nova/exception.py:1002 +#: nova/exception.py:1010 msgid "No cells available matching scheduling criteria." msgstr "" -#: nova/exception.py:1006 +#: nova/exception.py:1014 msgid "Cannot update cells configuration file." msgstr "" -#: nova/exception.py:1010 +#: nova/exception.py:1018 #, python-format msgid "Cell is not known for instance %(instance_uuid)s" msgstr "" -#: nova/exception.py:1014 +#: nova/exception.py:1022 #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" -#: nova/exception.py:1018 +#: nova/exception.py:1026 #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" -#: nova/exception.py:1023 +#: nova/exception.py:1031 #, python-format msgid "" "Metric %(name)s could not be found on the compute host node " "%(host)s.%(node)s." msgstr "" -#: nova/exception.py:1028 +#: nova/exception.py:1036 #, python-format msgid "File %(file_path)s could not be found." msgstr "" -#: nova/exception.py:1032 +#: nova/exception.py:1040 msgid "Zero files could be found." msgstr "" -#: nova/exception.py:1036 +#: nova/exception.py:1044 #, python-format msgid "Virtual switch associated with the network adapter %(adapter)s not found." msgstr "" -#: nova/exception.py:1041 +#: nova/exception.py:1049 #, python-format msgid "Network adapter %(adapter)s could not be found." msgstr "" -#: nova/exception.py:1045 +#: nova/exception.py:1053 #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "" -#: nova/exception.py:1049 +#: nova/exception.py:1057 msgid "Action not allowed." msgstr "" -#: nova/exception.py:1053 +#: nova/exception.py:1061 msgid "Rotation is not allowed for snapshots" msgstr "" -#: nova/exception.py:1057 +#: nova/exception.py:1065 msgid "Rotation param is required for backup image_type" msgstr "" -#: nova/exception.py:1062 nova/tests/compute/test_keypairs.py:144 +#: nova/exception.py:1070 nova/tests/compute/test_keypairs.py:146 #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "" -#: nova/exception.py:1066 +#: nova/exception.py:1074 #, python-format msgid "Instance %(name)s already exists." msgstr "" -#: nova/exception.py:1070 +#: nova/exception.py:1078 #, python-format msgid "Flavor with name %(name)s already exists." msgstr "" -#: nova/exception.py:1074 +#: nova/exception.py:1082 #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "" -#: nova/exception.py:1078 +#: nova/exception.py:1086 #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" -#: nova/exception.py:1083 +#: nova/exception.py:1091 #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "" -#: nova/exception.py:1087 +#: nova/exception.py:1095 #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "" -#: nova/exception.py:1091 +#: nova/exception.py:1099 #, python-format msgid "Storage error: %(reason)s" msgstr "" -#: nova/exception.py:1095 +#: nova/exception.py:1103 #, python-format msgid "Migration error: %(reason)s" msgstr "" -#: nova/exception.py:1099 +#: nova/exception.py:1107 #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "" -#: nova/exception.py:1103 +#: nova/exception.py:1111 #, python-format msgid "Malformed message body: %(reason)s" msgstr "" -#: nova/exception.py:1109 +#: nova/exception.py:1117 #, python-format msgid "Could not find config at %(path)s" msgstr "" -#: nova/exception.py:1113 +#: nova/exception.py:1121 #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" -#: nova/exception.py:1117 +#: nova/exception.py:1125 msgid "When resizing, instances must change flavor!" msgstr "" -#: nova/exception.py:1121 +#: nova/exception.py:1129 #, python-format msgid "Resize error: %(reason)s" msgstr "" -#: nova/exception.py:1125 +#: nova/exception.py:1133 #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" -#: nova/exception.py:1129 +#: nova/exception.py:1137 msgid "Flavor's memory is too small for requested image." msgstr "" -#: nova/exception.py:1133 +#: nova/exception.py:1141 msgid "Flavor's disk is too small for requested image." msgstr "" -#: nova/exception.py:1137 +#: nova/exception.py:1145 #, python-format msgid "Insufficient free memory on compute node to start %(uuid)s." msgstr "" -#: nova/exception.py:1141 +#: nova/exception.py:1149 #, python-format msgid "No valid host was found. %(reason)s" msgstr "" -#: nova/exception.py:1146 +#: nova/exception.py:1154 #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "" -#: nova/exception.py:1153 +#: nova/exception.py:1161 #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used " "%(used)d of %(allowed)d %(resource)s" msgstr "" -#: nova/exception.py:1158 +#: nova/exception.py:1166 msgid "Maximum number of floating ips exceeded" msgstr "" -#: nova/exception.py:1162 +#: nova/exception.py:1170 msgid "Maximum number of fixed ips exceeded" msgstr "" -#: nova/exception.py:1166 +#: nova/exception.py:1174 #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "" -#: nova/exception.py:1170 +#: nova/exception.py:1178 msgid "Personality file limit exceeded" msgstr "" -#: nova/exception.py:1174 +#: nova/exception.py:1182 msgid "Personality file path too long" msgstr "" -#: nova/exception.py:1178 +#: nova/exception.py:1186 msgid "Personality file content too long" msgstr "" -#: nova/exception.py:1182 nova/tests/compute/test_keypairs.py:155 +#: nova/exception.py:1190 nova/tests/compute/test_keypairs.py:157 msgid "Maximum number of key pairs exceeded" msgstr "" -#: nova/exception.py:1187 +#: nova/exception.py:1195 msgid "Maximum number of security groups or rules exceeded" msgstr "" -#: nova/exception.py:1191 +#: nova/exception.py:1199 msgid "Maximum number of ports exceeded" msgstr "" -#: nova/exception.py:1195 +#: nova/exception.py:1203 #, python-format msgid "" "Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " "%(reason)s." msgstr "" -#: nova/exception.py:1200 +#: nova/exception.py:1208 #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "" -#: nova/exception.py:1204 +#: nova/exception.py:1212 #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "" -#: nova/exception.py:1208 +#: nova/exception.py:1216 #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "" -#: nova/exception.py:1212 +#: nova/exception.py:1220 #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" -#: nova/exception.py:1217 +#: nova/exception.py:1225 #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "" -#: nova/exception.py:1221 +#: nova/exception.py:1229 msgid "Unable to create flavor" msgstr "" -#: nova/exception.py:1225 +#: nova/exception.py:1233 #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" -#: nova/exception.py:1231 +#: nova/exception.py:1239 #, python-format msgid "Detected existing vlan with id %(vlan)d" msgstr "" -#: nova/exception.py:1235 +#: nova/exception.py:1243 msgid "There was a conflict when trying to complete your request." msgstr "" -#: nova/exception.py:1241 +#: nova/exception.py:1249 #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "" -#: nova/exception.py:1245 +#: nova/exception.py:1253 #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" -#: nova/exception.py:1250 +#: nova/exception.py:1258 #, python-format msgid "Node %(node_id)s could not be found." msgstr "" -#: nova/exception.py:1254 +#: nova/exception.py:1262 #, python-format msgid "Node with UUID %(node_uuid)s could not be found." msgstr "" -#: nova/exception.py:1258 +#: nova/exception.py:1266 #, python-format msgid "Marker %(marker)s could not be found." msgstr "" -#: nova/exception.py:1263 +#: nova/exception.py:1271 #, python-format msgid "Invalid id: %(val)s (expecting \"i-...\")." msgstr "" -#: nova/exception.py:1267 +#: nova/exception.py:1275 #, python-format msgid "Could not fetch image %(image_id)s" msgstr "" -#: nova/exception.py:1271 +#: nova/exception.py:1279 #, python-format msgid "Could not upload image %(image_id)s" msgstr "" -#: nova/exception.py:1275 +#: nova/exception.py:1283 #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "" -#: nova/exception.py:1279 +#: nova/exception.py:1287 #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "" -#: nova/exception.py:1283 +#: nova/exception.py:1291 #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "" -#: nova/exception.py:1287 +#: nova/exception.py:1295 #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "" -#: nova/exception.py:1291 +#: nova/exception.py:1299 #, python-format msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" -#: nova/exception.py:1296 +#: nova/exception.py:1304 #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" -#: nova/exception.py:1301 +#: nova/exception.py:1309 #, python-format -msgid "Failed to attach network adapter device to %(instance)s" +msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "" -#: nova/exception.py:1305 +#: nova/exception.py:1314 #, python-format -msgid "Failed to detach network adapter device from %(instance)s" +msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" -#: nova/exception.py:1309 +#: nova/exception.py:1319 #, python-format msgid "" "User data too large. User data must be no larger than %(maxsize)s bytes " "once base64 encoded. Your data is %(length)d bytes" msgstr "" -#: nova/exception.py:1315 +#: nova/exception.py:1325 msgid "User data needs to be valid base 64." msgstr "" -#: nova/exception.py:1319 +#: nova/exception.py:1329 #, python-format msgid "" "Unexpected task state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1328 +#: nova/exception.py:1338 #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not " "found" msgstr "" -#: nova/exception.py:1333 +#: nova/exception.py:1343 #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "" -#: nova/exception.py:1337 +#: nova/exception.py:1347 #, python-format msgid "" "Unexpected VM state: expecting %(expected)s but the actual state is " "%(actual)s" msgstr "" -#: nova/exception.py:1342 +#: nova/exception.py:1352 #, python-format msgid "The CA file for %(project)s could not be found" msgstr "" -#: nova/exception.py:1346 +#: nova/exception.py:1356 #, python-format msgid "The CRL file for %(project)s could not be found" msgstr "" -#: nova/exception.py:1350 +#: nova/exception.py:1360 msgid "Instance recreate is not supported." msgstr "" -#: nova/exception.py:1354 +#: nova/exception.py:1364 #, python-format msgid "" "The service from servicegroup driver %(driver)s is temporarily " "unavailable." msgstr "" -#: nova/exception.py:1359 +#: nova/exception.py:1369 #, python-format msgid "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" -#: nova/exception.py:1364 +#: nova/exception.py:1374 #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" -#: nova/exception.py:1369 +#: nova/exception.py:1379 #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt " "driver" msgstr "" -#: nova/exception.py:1374 +#: nova/exception.py:1384 #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "" -#: nova/exception.py:1378 +#: nova/exception.py:1388 #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "" -#: nova/exception.py:1382 +#: nova/exception.py:1392 #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" -#: nova/exception.py:1387 +#: nova/exception.py:1397 #, python-format msgid "Shadow table with name %(name)s already exists." msgstr "" -#: nova/exception.py:1392 +#: nova/exception.py:1402 #, python-format msgid "Instance rollback performed due to: %s" msgstr "" -#: nova/exception.py:1398 +#: nova/exception.py:1408 #, python-format msgid "Unsupported object type %(objtype)s" msgstr "" -#: nova/exception.py:1402 +#: nova/exception.py:1412 #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "" -#: nova/exception.py:1406 +#: nova/exception.py:1416 #, python-format msgid "Version %(objver)s of %(objname)s is not supported" msgstr "" -#: nova/exception.py:1410 +#: nova/exception.py:1420 #, python-format msgid "Cannot modify readonly field %(field)s" msgstr "" -#: nova/exception.py:1414 +#: nova/exception.py:1424 #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "" -#: nova/exception.py:1418 +#: nova/exception.py:1428 #, python-format msgid "Field %(field)s of %(objname)s is not an instance of Field" msgstr "" -#: nova/exception.py:1422 +#: nova/exception.py:1432 #, python-format msgid "Core API extensions are missing: %(missing_apis)s" msgstr "" -#: nova/exception.py:1426 +#: nova/exception.py:1436 #, python-format msgid "Error during following call to agent: %(method)s" msgstr "" -#: nova/exception.py:1430 +#: nova/exception.py:1440 #, python-format msgid "Unable to contact guest agent. The following call timed out: %(method)s" msgstr "" -#: nova/exception.py:1435 +#: nova/exception.py:1445 #, python-format msgid "Agent does not support the call: %(method)s" msgstr "" -#: nova/exception.py:1439 +#: nova/exception.py:1449 #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "" -#: nova/exception.py:1443 +#: nova/exception.py:1453 #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "" -#: nova/exception.py:1447 +#: nova/exception.py:1457 #, python-format msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s." msgstr "" -#: nova/exception.py:1452 +#: nova/exception.py:1462 #, python-format msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s." msgstr "" -#: nova/exception.py:1457 +#: nova/exception.py:1467 #, python-format msgid "Instance group %(group_uuid)s has no policy %(policy)s." msgstr "" -#: nova/exception.py:1461 +#: nova/exception.py:1471 #, python-format msgid "Number of retries to plugin (%(num_retries)d) exceeded." msgstr "" -#: nova/exception.py:1465 +#: nova/exception.py:1475 #, python-format msgid "There was an error with the download module %(module)s. %(reason)s" msgstr "" -#: nova/exception.py:1470 +#: nova/exception.py:1480 #, python-format msgid "" "The metadata for this location will not work with this module %(module)s." " %(reason)s." msgstr "" -#: nova/exception.py:1475 +#: nova/exception.py:1485 #, python-format msgid "The method %(method_name)s is not implemented." msgstr "" -#: nova/exception.py:1479 +#: nova/exception.py:1489 #, python-format msgid "The module %(module)s is misconfigured: %(reason)s." msgstr "" -#: nova/exception.py:1483 +#: nova/exception.py:1493 #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "" -#: nova/exception.py:1487 +#: nova/exception.py:1497 #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "" -#: nova/exception.py:1491 +#: nova/exception.py:1501 +#, python-format +msgid "" +"Invalid PCI Whitelist: The PCI address %(address)s has an invalid " +"%(field)s." +msgstr "" + +#: nova/exception.py:1506 +msgid "" +"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, " +"but not both" +msgstr "" + +#: nova/exception.py:1512 #, python-format msgid "PCI device %(id)s not found" msgstr "" -#: nova/exception.py:1495 +#: nova/exception.py:1516 #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "" -#: nova/exception.py:1499 +#: nova/exception.py:1520 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" -#: nova/exception.py:1505 +#: nova/exception.py:1526 #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead " "of %(hopeowner)s" msgstr "" -#: nova/exception.py:1511 +#: nova/exception.py:1532 #, python-format msgid "PCI device request (%requests)s failed" msgstr "" -#: nova/exception.py:1516 +#: nova/exception.py:1537 #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty " "pool" msgstr "" -#: nova/exception.py:1522 +#: nova/exception.py:1543 #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "" -#: nova/exception.py:1526 +#: nova/exception.py:1547 #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "" -#: nova/exception.py:1531 +#: nova/exception.py:1552 #, python-format msgid "Not enough parameters: %(reason)s" msgstr "" -#: nova/exception.py:1536 +#: nova/exception.py:1557 #, python-format msgid "Invalid PCI devices Whitelist config %(reason)s" msgstr "" -#: nova/exception.py:1540 +#: nova/exception.py:1561 #, python-format msgid "Cannot change %(node_id)s to %(new_node_id)s" msgstr "" -#: nova/exception.py:1550 +#: nova/exception.py:1571 #, python-format msgid "" "Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: " "%(reason)s" msgstr "" -#: nova/exception.py:1555 +#: nova/exception.py:1576 #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "" -#: nova/exception.py:1559 +#: nova/exception.py:1580 #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "" -#: nova/exception.py:1563 +#: nova/exception.py:1584 #, python-format msgid "Key manager error: %(reason)s" msgstr "" -#: nova/exception.py:1567 +#: nova/exception.py:1588 #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "" -#: nova/exception.py:1571 +#: nova/exception.py:1592 #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "" -#: nova/exception.py:1575 +#: nova/exception.py:1596 #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" -#: nova/exception.py:1580 +#: nova/exception.py:1601 #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the " "maximum allowed by flavor %(max_vram)d." msgstr "" -#: nova/exception.py:1585 +#: nova/exception.py:1606 #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "" -#: nova/exception.py:1589 +#: nova/exception.py:1610 msgid "" "Live migration of instances with config drives is not supported in " "libvirt unless libvirt instance path and drive data is shared across " "compute nodes." msgstr "" -#: nova/exception.py:1595 +#: nova/exception.py:1616 #, python-format msgid "" "Host %(server)s is running an old version of Nova, live migrations " @@ -1774,32 +1797,37 @@ msgid "" "and try again." msgstr "" -#: nova/exception.py:1601 +#: nova/exception.py:1622 #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" -#: nova/exception.py:1605 +#: nova/exception.py:1626 #, python-format msgid "" "Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d" msgstr "" -#: nova/exception.py:1610 +#: nova/exception.py:1631 #, python-format msgid "" "Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted " "%(maxsockets)d:%(maxcores)d:%(maxthreads)d" msgstr "" -#: nova/exception.py:1615 +#: nova/exception.py:1636 #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to" " satisfy for vcpus count %(vcpus)d" msgstr "" +#: nova/exception.py:1641 +#, python-format +msgid "Architecture name '%(arch)s' is not recognised" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -1813,12 +1841,12 @@ msgstr "" msgid "Failed to get nw_info" msgstr "" -#: nova/quota.py:1326 +#: nova/quota.py:1332 #, python-format msgid "Failed to commit reservations %s" msgstr "" -#: nova/quota.py:1349 +#: nova/quota.py:1355 #, python-format msgid "Failed to roll back reservations %s" msgstr "" @@ -1900,37 +1928,41 @@ msgstr "" msgid "Could not remove tmpdir: %s" msgstr "" -#: nova/utils.py:963 +#: nova/utils.py:964 +msgid "The input is not a string or unicode" +msgstr "" + +#: nova/utils.py:966 #, python-format msgid "%s is not a string or unicode" msgstr "" -#: nova/utils.py:967 +#: nova/utils.py:973 #, python-format msgid "%(name)s has a minimum character requirement of %(min_length)s." msgstr "" -#: nova/utils.py:972 +#: nova/utils.py:978 #, python-format msgid "%(name)s has more than %(max_length)s characters." msgstr "" -#: nova/utils.py:982 +#: nova/utils.py:988 #, python-format msgid "%(value_name)s must be an integer" msgstr "" -#: nova/utils.py:988 +#: nova/utils.py:994 #, python-format msgid "%(value_name)s must be >= %(min_value)d" msgstr "" -#: nova/utils.py:994 +#: nova/utils.py:1000 #, python-format msgid "%(value_name)s must be <= %(max_value)d" msgstr "" -#: nova/utils.py:1028 +#: nova/utils.py:1034 #, python-format msgid "Hypervisor version %s is invalid." msgstr "" @@ -1950,22 +1982,22 @@ msgstr "" msgid "%(name)s listening on %(host)s:%(port)s" msgstr "" -#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:50 +#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:47 #, python-format msgid "Unable to find cert_file : %s" msgstr "" -#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:53 +#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:50 #, python-format msgid "Unable to find ca_file : %s" msgstr "" -#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:56 +#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:53 #, python-format msgid "Unable to find key_file : %s" msgstr "" -#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:59 +#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:56 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" @@ -1988,274 +2020,238 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: nova/api/auth.py:72 -msgid "ratelimit_v3 is removed from v3 api." -msgstr "" - -#: nova/api/auth.py:135 +#: nova/api/auth.py:136 msgid "Invalid service catalog json." msgstr "" -#: nova/api/auth.py:159 -msgid "Sourcing roles from deprecated X-Role HTTP header" -msgstr "" - #: nova/api/sizelimit.py:53 nova/api/sizelimit.py:62 nova/api/sizelimit.py:76 #: nova/api/metadata/password.py:62 msgid "Request is too large." msgstr "" -#: nova/api/ec2/__init__.py:88 +#: nova/api/ec2/__init__.py:89 #, python-format msgid "FaultWrapper: %s" msgstr "" -#: nova/api/ec2/__init__.py:159 +#: nova/api/ec2/__init__.py:160 msgid "Too many failed authentications." msgstr "" -#: nova/api/ec2/__init__.py:168 -#, python-format -msgid "" -"Access key %(access_key)s has had %(failures)d failed authentications and" -" will be locked out for %(lock_mins)d minutes." -msgstr "" - -#: nova/api/ec2/__init__.py:187 +#: nova/api/ec2/__init__.py:188 msgid "Signature not provided" msgstr "" -#: nova/api/ec2/__init__.py:192 +#: nova/api/ec2/__init__.py:193 msgid "Access key not provided" msgstr "" -#: nova/api/ec2/__init__.py:228 nova/api/ec2/__init__.py:244 +#: nova/api/ec2/__init__.py:229 nova/api/ec2/__init__.py:245 msgid "Failure communicating with keystone" msgstr "" -#: nova/api/ec2/__init__.py:304 +#: nova/api/ec2/__init__.py:305 msgid "Timestamp failed validation." msgstr "" -#: nova/api/ec2/__init__.py:402 +#: nova/api/ec2/__init__.py:403 #, python-format msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/api/ec2/__init__.py:492 -#, python-format -msgid "Unexpected %(ex_name)s raised: %(ex_str)s" -msgstr "" - -#: nova/api/ec2/__init__.py:495 -#, python-format -msgid "%(ex_name)s raised: %(ex_str)s" -msgstr "" - -#: nova/api/ec2/__init__.py:519 -#, python-format -msgid "Environment: %s" -msgstr "" - -#: nova/api/ec2/__init__.py:521 +#: nova/api/ec2/__init__.py:522 msgid "Unknown error occurred." msgstr "" -#: nova/api/ec2/cloud.py:391 +#: nova/api/ec2/cloud.py:392 #, python-format msgid "Create snapshot of volume %s" msgstr "" -#: nova/api/ec2/cloud.py:416 +#: nova/api/ec2/cloud.py:417 #, python-format msgid "Could not find key pair(s): %s" msgstr "" -#: nova/api/ec2/cloud.py:432 +#: nova/api/ec2/cloud.py:433 #, python-format msgid "Create key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:444 +#: nova/api/ec2/cloud.py:445 #, python-format msgid "Import key %s" msgstr "" -#: nova/api/ec2/cloud.py:457 +#: nova/api/ec2/cloud.py:458 #, python-format msgid "Delete key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:599 nova/api/ec2/cloud.py:729 +#: nova/api/ec2/cloud.py:600 nova/api/ec2/cloud.py:730 msgid "need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:604 +#: nova/api/ec2/cloud.py:605 msgid "can't build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:612 +#: nova/api/ec2/cloud.py:613 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "" -#: nova/api/ec2/cloud.py:646 nova/api/ec2/cloud.py:682 +#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:683 msgid "No rule for the specified parameters." msgstr "" -#: nova/api/ec2/cloud.py:760 +#: nova/api/ec2/cloud.py:761 #, python-format msgid "Get console output for instance %s" msgstr "" -#: nova/api/ec2/cloud.py:832 +#: nova/api/ec2/cloud.py:833 #, python-format msgid "Create volume from snapshot %s" msgstr "" -#: nova/api/ec2/cloud.py:836 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:837 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "" -#: nova/api/ec2/cloud.py:876 +#: nova/api/ec2/cloud.py:877 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/api/ec2/cloud.py:906 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:907 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "" -#: nova/api/ec2/cloud.py:1238 +#: nova/api/ec2/cloud.py:1261 msgid "Allocate address" msgstr "" -#: nova/api/ec2/cloud.py:1243 +#: nova/api/ec2/cloud.py:1266 #, python-format msgid "Release address %s" msgstr "" -#: nova/api/ec2/cloud.py:1248 +#: nova/api/ec2/cloud.py:1271 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1258 +#: nova/api/ec2/cloud.py:1281 msgid "Unable to associate IP Address, no fixed_ips." msgstr "" -#: nova/api/ec2/cloud.py:1266 -#: nova/api/openstack/compute/contrib/floating_ips.py:251 -#, python-format -msgid "multiple fixed_ips exist, using the first: %s" -msgstr "" - -#: nova/api/ec2/cloud.py:1279 +#: nova/api/ec2/cloud.py:1302 #, python-format msgid "Disassociate address %s" msgstr "" -#: nova/api/ec2/cloud.py:1296 nova/api/openstack/compute/servers.py:918 +#: nova/api/ec2/cloud.py:1319 nova/api/openstack/compute/servers.py:920 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "" -#: nova/api/ec2/cloud.py:1328 +#: nova/api/ec2/cloud.py:1351 msgid "Image must be available" msgstr "" -#: nova/api/ec2/cloud.py:1424 +#: nova/api/ec2/cloud.py:1451 #, python-format msgid "Reboot instance %r" msgstr "" -#: nova/api/ec2/cloud.py:1537 +#: nova/api/ec2/cloud.py:1566 #, python-format msgid "De-registering image %s" msgstr "" -#: nova/api/ec2/cloud.py:1553 +#: nova/api/ec2/cloud.py:1582 msgid "imageLocation is required" msgstr "" -#: nova/api/ec2/cloud.py:1573 +#: nova/api/ec2/cloud.py:1602 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1634 +#: nova/api/ec2/cloud.py:1663 msgid "user or group not specified" msgstr "" -#: nova/api/ec2/cloud.py:1637 +#: nova/api/ec2/cloud.py:1666 msgid "only group \"all\" is supported" msgstr "" -#: nova/api/ec2/cloud.py:1640 +#: nova/api/ec2/cloud.py:1669 msgid "operation_type must be add or remove" msgstr "" -#: nova/api/ec2/cloud.py:1642 +#: nova/api/ec2/cloud.py:1671 #, python-format msgid "Updating image %s publicity" msgstr "" -#: nova/api/ec2/cloud.py:1655 +#: nova/api/ec2/cloud.py:1684 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "" -#: nova/api/ec2/cloud.py:1685 +#: nova/api/ec2/cloud.py:1714 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" msgstr "" -#: nova/api/ec2/cloud.py:1718 +#: nova/api/ec2/cloud.py:1747 #, python-format msgid "" "Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " "%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1742 +#: nova/api/ec2/cloud.py:1771 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "" -#: nova/api/ec2/cloud.py:1767 nova/api/ec2/cloud.py:1817 +#: nova/api/ec2/cloud.py:1796 nova/api/ec2/cloud.py:1846 msgid "resource_id and tag are required" msgstr "" -#: nova/api/ec2/cloud.py:1771 nova/api/ec2/cloud.py:1821 +#: nova/api/ec2/cloud.py:1800 nova/api/ec2/cloud.py:1850 msgid "Expecting a list of resources" msgstr "" -#: nova/api/ec2/cloud.py:1776 nova/api/ec2/cloud.py:1826 -#: nova/api/ec2/cloud.py:1884 +#: nova/api/ec2/cloud.py:1805 nova/api/ec2/cloud.py:1855 +#: nova/api/ec2/cloud.py:1913 msgid "Only instances implemented" msgstr "" -#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1830 +#: nova/api/ec2/cloud.py:1809 nova/api/ec2/cloud.py:1859 msgid "Expecting a list of tagSets" msgstr "" -#: nova/api/ec2/cloud.py:1786 nova/api/ec2/cloud.py:1839 +#: nova/api/ec2/cloud.py:1815 nova/api/ec2/cloud.py:1868 msgid "Expecting tagSet to be key/value pairs" msgstr "" -#: nova/api/ec2/cloud.py:1793 +#: nova/api/ec2/cloud.py:1822 msgid "Expecting both key and value to be set" msgstr "" -#: nova/api/ec2/cloud.py:1844 +#: nova/api/ec2/cloud.py:1873 msgid "Expecting key to be set" msgstr "" -#: nova/api/ec2/cloud.py:1918 +#: nova/api/ec2/cloud.py:1947 msgid "Invalid CIDR" msgstr "" @@ -2272,233 +2268,145 @@ msgstr "" msgid "Timestamp is invalid." msgstr "" -#: nova/api/metadata/handler.py:112 -msgid "" -"X-Instance-ID present in request headers. The " -"'service_neutron_metadata_proxy' option must be enabled to process this " -"header." -msgstr "" - -#: nova/api/metadata/handler.py:141 nova/api/metadata/handler.py:148 +#: nova/api/metadata/handler.py:148 #, python-format msgid "Failed to get metadata for ip: %s" msgstr "" -#: nova/api/metadata/handler.py:143 nova/api/metadata/handler.py:199 +#: nova/api/metadata/handler.py:150 nova/api/metadata/handler.py:207 msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/api/metadata/handler.py:161 +#: nova/api/metadata/handler.py:169 msgid "X-Instance-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:163 +#: nova/api/metadata/handler.py:171 msgid "X-Tenant-ID header is missing from request." msgstr "" -#: nova/api/metadata/handler.py:165 +#: nova/api/metadata/handler.py:173 msgid "Multiple X-Instance-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:167 +#: nova/api/metadata/handler.py:175 msgid "Multiple X-Tenant-ID headers found within request." msgstr "" -#: nova/api/metadata/handler.py:181 -#, python-format -msgid "" -"X-Instance-ID-Signature: %(signature)s does not match the expected value:" -" %(expected_signature)s for id: %(instance_id)s. Request From: " -"%(remote_address)s" -msgstr "" - -#: nova/api/metadata/handler.py:190 +#: nova/api/metadata/handler.py:198 msgid "Invalid proxy request signature." msgstr "" -#: nova/api/metadata/handler.py:197 nova/api/metadata/handler.py:204 +#: nova/api/metadata/handler.py:205 #, python-format msgid "Failed to get metadata for instance id: %s" msgstr "" -#: nova/api/metadata/handler.py:208 -#, python-format -msgid "" -"Tenant_id %(tenant_id)s does not match tenant_id of instance " -"%(instance_id)s." -msgstr "" - -#: nova/api/metadata/vendordata_json.py:47 -msgid "file does not exist" -msgstr "" - -#: nova/api/metadata/vendordata_json.py:49 -msgid "Unexpected IOError when reading" -msgstr "" - -#: nova/api/metadata/vendordata_json.py:52 -msgid "failed to load json" -msgstr "" - -#: nova/api/openstack/__init__.py:89 +#: nova/api/openstack/__init__.py:92 #, python-format msgid "Caught error: %s" msgstr "" -#: nova/api/openstack/__init__.py:98 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "" - -#: nova/api/openstack/__init__.py:186 +#: nova/api/openstack/__init__.py:189 msgid "Must specify an ExtensionManager class" msgstr "" -#: nova/api/openstack/__init__.py:232 nova/api/openstack/__init__.py:406 -#, python-format -msgid "" -"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " -"resource" -msgstr "" - -#: nova/api/openstack/__init__.py:279 -#: nova/api/openstack/compute/plugins/v3/servers.py:99 -#, python-format -msgid "Not loading %s because it is in the blacklist" -msgstr "" - -#: nova/api/openstack/__init__.py:284 -#: nova/api/openstack/compute/plugins/v3/servers.py:104 -#, python-format -msgid "Not loading %s because it is not in the whitelist" -msgstr "" - -#: nova/api/openstack/__init__.py:291 -msgid "V3 API has been disabled by configuration" -msgstr "" - -#: nova/api/openstack/__init__.py:304 -#, python-format -msgid "Extensions in both blacklist and whitelist: %s" -msgstr "" - -#: nova/api/openstack/__init__.py:328 -#, python-format -msgid "Missing core API extensions: %s" -msgstr "" - -#: nova/api/openstack/common.py:132 -#, python-format -msgid "" -"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. " -"Bad upgrade or db corrupted?" -msgstr "" - -#: nova/api/openstack/common.py:182 +#: nova/api/openstack/common.py:185 #, python-format msgid "%s param must be an integer" msgstr "" -#: nova/api/openstack/common.py:185 +#: nova/api/openstack/common.py:188 #, python-format msgid "%s param must be positive" msgstr "" -#: nova/api/openstack/common.py:210 +#: nova/api/openstack/common.py:213 msgid "offset param must be an integer" msgstr "" -#: nova/api/openstack/common.py:216 +#: nova/api/openstack/common.py:219 msgid "limit param must be an integer" msgstr "" -#: nova/api/openstack/common.py:220 +#: nova/api/openstack/common.py:223 msgid "limit param must be positive" msgstr "" -#: nova/api/openstack/common.py:224 +#: nova/api/openstack/common.py:227 msgid "offset param must be positive" msgstr "" -#: nova/api/openstack/common.py:276 +#: nova/api/openstack/common.py:280 #, python-format msgid "href %s does not contain version" msgstr "" -#: nova/api/openstack/common.py:291 +#: nova/api/openstack/common.py:293 msgid "Image metadata limit exceeded" msgstr "" -#: nova/api/openstack/common.py:299 +#: nova/api/openstack/common.py:301 msgid "Image metadata key cannot be blank" msgstr "" -#: nova/api/openstack/common.py:302 +#: nova/api/openstack/common.py:304 msgid "Image metadata key too long" msgstr "" -#: nova/api/openstack/common.py:305 +#: nova/api/openstack/common.py:307 msgid "Invalid image metadata" msgstr "" -#: nova/api/openstack/common.py:368 +#: nova/api/openstack/common.py:370 #, python-format msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" msgstr "" -#: nova/api/openstack/common.py:371 +#: nova/api/openstack/common.py:373 #, python-format msgid "Cannot '%s' an instance which has never been active" msgstr "" -#: nova/api/openstack/common.py:374 +#: nova/api/openstack/common.py:376 #, python-format msgid "Instance is in an invalid state for '%s'" msgstr "" -#: nova/api/openstack/common.py:454 -msgid "Rejecting snapshot request, snapshots currently disabled" -msgstr "" - -#: nova/api/openstack/common.py:456 +#: nova/api/openstack/common.py:458 msgid "Instance snapshots are not permitted at this time." msgstr "" -#: nova/api/openstack/common.py:577 +#: nova/api/openstack/common.py:579 msgid "Cells is not enabled." msgstr "" -#: nova/api/openstack/extensions.py:197 +#: nova/api/openstack/extensions.py:198 #, python-format msgid "Loaded extension: %s" msgstr "" -#: nova/api/openstack/extensions.py:243 +#: nova/api/openstack/extensions.py:244 #: nova/api/openstack/compute/plugins/__init__.py:51 #, python-format msgid "Exception loading extension: %s" msgstr "" -#: nova/api/openstack/extensions.py:278 -#, python-format -msgid "Failed to load extension %(ext_factory)s: %(exc)s" -msgstr "" - -#: nova/api/openstack/extensions.py:349 +#: nova/api/openstack/extensions.py:350 #, python-format msgid "Failed to load extension %(classpath)s: %(exc)s" msgstr "" -#: nova/api/openstack/extensions.py:372 +#: nova/api/openstack/extensions.py:373 #, python-format msgid "Failed to load extension %(ext_name)s:%(exc)s" msgstr "" -#: nova/api/openstack/extensions.py:494 +#: nova/api/openstack/extensions.py:495 msgid "Unexpected exception in API method" msgstr "" -#: nova/api/openstack/extensions.py:495 +#: nova/api/openstack/extensions.py:496 #, python-format msgid "" "Unexpected API Error. Please report this at " @@ -2507,56 +2415,41 @@ msgid "" "%s" msgstr "" -#: nova/api/openstack/wsgi.py:228 nova/api/openstack/wsgi.py:633 +#: nova/api/openstack/wsgi.py:230 nova/api/openstack/wsgi.py:635 msgid "cannot understand JSON" msgstr "" -#: nova/api/openstack/wsgi.py:638 +#: nova/api/openstack/wsgi.py:640 msgid "too many body keys" msgstr "" -#: nova/api/openstack/wsgi.py:682 -#, python-format -msgid "Exception handling resource: %s" -msgstr "" - -#: nova/api/openstack/wsgi.py:686 -#, python-format -msgid "Fault thrown: %s" -msgstr "" - -#: nova/api/openstack/wsgi.py:689 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "" - -#: nova/api/openstack/wsgi.py:919 +#: nova/api/openstack/wsgi.py:921 #, python-format msgid "There is no such action: %s" msgstr "" -#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:949 +#: nova/api/openstack/wsgi.py:924 nova/api/openstack/wsgi.py:951 #: nova/api/openstack/compute/server_metadata.py:57 #: nova/api/openstack/compute/server_metadata.py:75 #: nova/api/openstack/compute/server_metadata.py:100 #: nova/api/openstack/compute/server_metadata.py:126 -#: nova/api/openstack/compute/contrib/evacuate.py:45 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:58 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:73 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:95 +#: nova/api/openstack/compute/contrib/evacuate.py:47 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:60 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:75 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:97 msgid "Malformed request body" msgstr "" -#: nova/api/openstack/wsgi.py:926 +#: nova/api/openstack/wsgi.py:928 #, python-format msgid "Action: '%(action)s', body: %(body)s" msgstr "" -#: nova/api/openstack/wsgi.py:946 +#: nova/api/openstack/wsgi.py:948 msgid "Unsupported Content-Type" msgstr "" -#: nova/api/openstack/wsgi.py:958 +#: nova/api/openstack/wsgi.py:960 #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " @@ -2585,7 +2478,7 @@ msgid "Initializing extension manager." msgstr "" #: nova/api/openstack/compute/flavors.py:107 -#: nova/api/openstack/compute/plugins/v3/flavors.py:70 +#: nova/api/openstack/compute/plugins/v3/flavors.py:72 #, python-format msgid "Invalid is_public filter [%s]" msgstr "" @@ -2601,269 +2494,256 @@ msgid "Invalid minDisk filter [%s]" msgstr "" #: nova/api/openstack/compute/flavors.py:146 -#: nova/api/openstack/compute/servers.py:603 -#: nova/api/openstack/compute/plugins/v3/flavors.py:110 -#: nova/api/openstack/compute/plugins/v3/servers.py:280 +#: nova/api/openstack/compute/servers.py:606 +#: nova/api/openstack/compute/plugins/v3/flavors.py:112 +#: nova/api/openstack/compute/plugins/v3/servers.py:303 #, python-format msgid "marker [%s] not found" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:35 -#: nova/api/openstack/compute/images.py:141 -#: nova/api/openstack/compute/images.py:157 +#: nova/api/openstack/compute/image_metadata.py:37 +#: nova/api/openstack/compute/images.py:135 +#: nova/api/openstack/compute/images.py:151 msgid "Image not found." msgstr "" -#: nova/api/openstack/compute/image_metadata.py:78 +#: nova/api/openstack/compute/image_metadata.py:81 msgid "Incorrect request body format" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:82 +#: nova/api/openstack/compute/image_metadata.py:85 #: nova/api/openstack/compute/server_metadata.py:79 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:108 #: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:77 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:79 msgid "Request body and URI mismatch" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:85 +#: nova/api/openstack/compute/image_metadata.py:88 #: nova/api/openstack/compute/server_metadata.py:83 #: nova/api/openstack/compute/contrib/flavorextraspecs.py:111 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:81 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:83 msgid "Request body contains too many items" msgstr "" -#: nova/api/openstack/compute/image_metadata.py:117 +#: nova/api/openstack/compute/image_metadata.py:122 msgid "Invalid metadata key" msgstr "" -#: nova/api/openstack/compute/images.py:162 +#: nova/api/openstack/compute/images.py:156 msgid "You are not allowed to delete the image." msgstr "" #: nova/api/openstack/compute/ips.py:67 -#: nova/api/openstack/compute/plugins/v3/ips.py:39 +#: nova/api/openstack/compute/plugins/v3/ips.py:41 msgid "Instance does not exist" msgstr "" #: nova/api/openstack/compute/ips.py:90 -#: nova/api/openstack/compute/plugins/v3/ips.py:60 +#: nova/api/openstack/compute/plugins/v3/ips.py:62 msgid "Instance is not a member of specified network" msgstr "" -#: nova/api/openstack/compute/limits.py:161 +#: nova/api/openstack/compute/limits.py:162 #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" -#: nova/api/openstack/compute/limits.py:287 +#: nova/api/openstack/compute/limits.py:288 msgid "This request was rate-limited." msgstr "" #: nova/api/openstack/compute/server_metadata.py:37 #: nova/api/openstack/compute/server_metadata.py:122 #: nova/api/openstack/compute/server_metadata.py:177 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:41 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:43 msgid "Server does not exist" msgstr "" #: nova/api/openstack/compute/server_metadata.py:157 #: nova/api/openstack/compute/server_metadata.py:168 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:144 -#: nova/api/openstack/compute/plugins/v3/server_metadata.py:156 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:146 +#: nova/api/openstack/compute/plugins/v3/server_metadata.py:158 msgid "Metadata item was not found" msgstr "" -#: nova/api/openstack/compute/servers.py:81 -msgid "" -"XML support has been deprecated and may be removed as early as the Juno " -"release." -msgstr "" - -#: nova/api/openstack/compute/servers.py:551 -#: nova/api/openstack/compute/contrib/cells.py:423 -#: nova/api/openstack/compute/plugins/v3/cells.py:331 +#: nova/api/openstack/compute/servers.py:554 +#: nova/api/openstack/compute/contrib/cells.py:427 msgid "Invalid changes-since value" msgstr "" -#: nova/api/openstack/compute/servers.py:570 -#: nova/api/openstack/compute/plugins/v3/servers.py:234 +#: nova/api/openstack/compute/servers.py:573 +#: nova/api/openstack/compute/plugins/v3/servers.py:257 msgid "Only administrators may list deleted instances" msgstr "" -#: nova/api/openstack/compute/servers.py:606 -#: nova/api/openstack/compute/plugins/v3/servers.py:283 -#, python-format -msgid "Flavor '%s' could not be found " -msgstr "" - -#: nova/api/openstack/compute/servers.py:625 -#: nova/api/openstack/compute/servers.py:772 -#: nova/api/openstack/compute/servers.py:1081 +#: nova/api/openstack/compute/servers.py:627 +#: nova/api/openstack/compute/servers.py:774 +#: nova/api/openstack/compute/servers.py:1078 #: nova/api/openstack/compute/servers.py:1203 #: nova/api/openstack/compute/servers.py:1388 -#: nova/api/openstack/compute/plugins/v3/servers.py:617 -#: nova/api/openstack/compute/plugins/v3/servers.py:729 -#: nova/api/openstack/compute/plugins/v3/servers.py:848 +#: nova/api/openstack/compute/plugins/v3/servers.py:650 +#: nova/api/openstack/compute/plugins/v3/servers.py:768 +#: nova/api/openstack/compute/plugins/v3/servers.py:889 msgid "Instance could not be found" msgstr "" -#: nova/api/openstack/compute/servers.py:656 +#: nova/api/openstack/compute/servers.py:658 #, python-format msgid "Bad personality format: missing %s" msgstr "" -#: nova/api/openstack/compute/servers.py:659 +#: nova/api/openstack/compute/servers.py:661 msgid "Bad personality format" msgstr "" -#: nova/api/openstack/compute/servers.py:662 +#: nova/api/openstack/compute/servers.py:664 #, python-format msgid "Personality content for %s cannot be decoded" msgstr "" -#: nova/api/openstack/compute/servers.py:677 +#: nova/api/openstack/compute/servers.py:679 msgid "Unknown argument : port" msgstr "" -#: nova/api/openstack/compute/servers.py:680 -#: nova/api/openstack/compute/plugins/v3/servers.py:338 +#: nova/api/openstack/compute/servers.py:682 +#: nova/api/openstack/compute/plugins/v3/servers.py:361 #, python-format msgid "Bad port format: port uuid is not in proper format (%s)" msgstr "" -#: nova/api/openstack/compute/servers.py:690 -#: nova/api/openstack/compute/plugins/v3/servers.py:354 +#: nova/api/openstack/compute/servers.py:692 +#: nova/api/openstack/compute/plugins/v3/servers.py:377 #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" -#: nova/api/openstack/compute/servers.py:701 -#: nova/api/openstack/compute/plugins/v3/servers.py:327 +#: nova/api/openstack/compute/servers.py:703 +#: nova/api/openstack/compute/plugins/v3/servers.py:350 #, python-format msgid "Invalid fixed IP address (%s)" msgstr "" -#: nova/api/openstack/compute/servers.py:714 -#: nova/api/openstack/compute/plugins/v3/servers.py:369 +#: nova/api/openstack/compute/servers.py:716 +#: nova/api/openstack/compute/plugins/v3/servers.py:392 #, python-format msgid "Duplicate networks (%s) are not allowed" msgstr "" -#: nova/api/openstack/compute/servers.py:720 -#: nova/api/openstack/compute/plugins/v3/servers.py:375 +#: nova/api/openstack/compute/servers.py:722 +#: nova/api/openstack/compute/plugins/v3/servers.py:398 #, python-format msgid "Bad network format: missing %s" msgstr "" -#: nova/api/openstack/compute/servers.py:723 -#: nova/api/openstack/compute/servers.py:824 -#: nova/api/openstack/compute/plugins/v3/servers.py:378 +#: nova/api/openstack/compute/servers.py:725 +#: nova/api/openstack/compute/servers.py:826 +#: nova/api/openstack/compute/plugins/v3/servers.py:401 msgid "Bad networks format" msgstr "" -#: nova/api/openstack/compute/servers.py:749 +#: nova/api/openstack/compute/servers.py:751 msgid "Userdata content cannot be decoded" msgstr "" -#: nova/api/openstack/compute/servers.py:754 +#: nova/api/openstack/compute/servers.py:756 msgid "accessIPv4 is not proper IPv4 format" msgstr "" -#: nova/api/openstack/compute/servers.py:759 +#: nova/api/openstack/compute/servers.py:761 msgid "accessIPv6 is not proper IPv6 format" msgstr "" -#: nova/api/openstack/compute/servers.py:788 -#: nova/api/openstack/compute/plugins/v3/servers.py:419 +#: nova/api/openstack/compute/servers.py:790 +#: nova/api/openstack/compute/plugins/v3/servers.py:443 msgid "Server name is not defined" msgstr "" -#: nova/api/openstack/compute/servers.py:840 -#: nova/api/openstack/compute/servers.py:968 +#: nova/api/openstack/compute/servers.py:842 +#: nova/api/openstack/compute/servers.py:970 msgid "Invalid flavorRef provided." msgstr "" -#: nova/api/openstack/compute/servers.py:880 +#: nova/api/openstack/compute/servers.py:882 msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" -#: nova/api/openstack/compute/servers.py:965 -#: nova/api/openstack/compute/plugins/v3/servers.py:495 +#: nova/api/openstack/compute/servers.py:967 +#: nova/api/openstack/compute/plugins/v3/servers.py:519 msgid "Can not find requested image" msgstr "" -#: nova/api/openstack/compute/servers.py:971 -#: nova/api/openstack/compute/plugins/v3/servers.py:501 +#: nova/api/openstack/compute/servers.py:973 +#: nova/api/openstack/compute/plugins/v3/servers.py:525 msgid "Invalid key_name provided." msgstr "" -#: nova/api/openstack/compute/servers.py:974 -#: nova/api/openstack/compute/plugins/v3/servers.py:504 +#: nova/api/openstack/compute/servers.py:976 +#: nova/api/openstack/compute/plugins/v3/servers.py:528 msgid "Invalid config_drive provided." msgstr "" -#: nova/api/openstack/compute/servers.py:1066 +#: nova/api/openstack/compute/servers.py:1063 msgid "HostId cannot be updated." msgstr "" -#: nova/api/openstack/compute/servers.py:1070 +#: nova/api/openstack/compute/servers.py:1067 msgid "Personality cannot be updated." msgstr "" -#: nova/api/openstack/compute/servers.py:1096 -#: nova/api/openstack/compute/servers.py:1115 -#: nova/api/openstack/compute/plugins/v3/servers.py:628 -#: nova/api/openstack/compute/plugins/v3/servers.py:644 +#: nova/api/openstack/compute/servers.py:1093 +#: nova/api/openstack/compute/servers.py:1112 +#: nova/api/openstack/compute/plugins/v3/servers.py:662 +#: nova/api/openstack/compute/plugins/v3/servers.py:679 msgid "Instance has not been resized." msgstr "" -#: nova/api/openstack/compute/servers.py:1118 -#: nova/api/openstack/compute/plugins/v3/servers.py:647 +#: nova/api/openstack/compute/servers.py:1115 +#: nova/api/openstack/compute/plugins/v3/servers.py:682 msgid "Flavor used by the instance could not be found." msgstr "" -#: nova/api/openstack/compute/servers.py:1134 -#: nova/api/openstack/compute/plugins/v3/servers.py:661 +#: nova/api/openstack/compute/servers.py:1131 +#: nova/api/openstack/compute/plugins/v3/servers.py:697 msgid "Argument 'type' for reboot must be a string" msgstr "" -#: nova/api/openstack/compute/servers.py:1140 -#: nova/api/openstack/compute/plugins/v3/servers.py:667 +#: nova/api/openstack/compute/servers.py:1137 +#: nova/api/openstack/compute/plugins/v3/servers.py:703 msgid "Argument 'type' for reboot is not HARD or SOFT" msgstr "" -#: nova/api/openstack/compute/servers.py:1144 -#: nova/api/openstack/compute/plugins/v3/servers.py:671 +#: nova/api/openstack/compute/servers.py:1141 +#: nova/api/openstack/compute/plugins/v3/servers.py:707 msgid "Missing argument 'type' for reboot" msgstr "" -#: nova/api/openstack/compute/servers.py:1171 -#: nova/api/openstack/compute/plugins/v3/servers.py:699 +#: nova/api/openstack/compute/servers.py:1168 +#: nova/api/openstack/compute/plugins/v3/servers.py:735 msgid "Unable to locate requested flavor." msgstr "" -#: nova/api/openstack/compute/servers.py:1174 -#: nova/api/openstack/compute/plugins/v3/servers.py:702 +#: nova/api/openstack/compute/servers.py:1171 +#: nova/api/openstack/compute/plugins/v3/servers.py:738 msgid "Resize requires a flavor change." msgstr "" -#: nova/api/openstack/compute/servers.py:1182 -#: nova/api/openstack/compute/plugins/v3/servers.py:710 +#: nova/api/openstack/compute/servers.py:1181 +#: nova/api/openstack/compute/plugins/v3/servers.py:748 msgid "You are not authorized to access the image the instance was started with." msgstr "" -#: nova/api/openstack/compute/servers.py:1186 -#: nova/api/openstack/compute/plugins/v3/servers.py:714 +#: nova/api/openstack/compute/servers.py:1185 +#: nova/api/openstack/compute/plugins/v3/servers.py:752 msgid "Image that the instance was started with could not be found." msgstr "" -#: nova/api/openstack/compute/servers.py:1190 -#: nova/api/openstack/compute/plugins/v3/servers.py:718 +#: nova/api/openstack/compute/servers.py:1189 +#: nova/api/openstack/compute/plugins/v3/servers.py:756 msgid "Invalid instance image." msgstr "" @@ -2906,176 +2786,131 @@ msgid "Could not parse imageRef from request." msgstr "" #: nova/api/openstack/compute/servers.py:1394 -#: nova/api/openstack/compute/plugins/v3/servers.py:854 +#: nova/api/openstack/compute/plugins/v3/servers.py:895 msgid "Cannot find image for rebuild" msgstr "" -#: nova/api/openstack/compute/servers.py:1427 +#: nova/api/openstack/compute/servers.py:1428 msgid "createImage entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/servers.py:1436 -#: nova/api/openstack/compute/contrib/admin_actions.py:288 -#: nova/api/openstack/compute/plugins/v3/servers.py:894 +#: nova/api/openstack/compute/servers.py:1437 +#: nova/api/openstack/compute/contrib/admin_actions.py:283 +#: nova/api/openstack/compute/plugins/v3/servers.py:936 msgid "Invalid metadata" msgstr "" -#: nova/api/openstack/compute/servers.py:1494 +#: nova/api/openstack/compute/servers.py:1495 msgid "Invalid adminPass" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:63 -#: nova/api/openstack/compute/contrib/admin_actions.py:88 -#: nova/api/openstack/compute/contrib/admin_actions.py:113 -#: nova/api/openstack/compute/contrib/admin_actions.py:135 -#: nova/api/openstack/compute/contrib/admin_actions.py:178 -#: nova/api/openstack/compute/contrib/admin_actions.py:197 -#: nova/api/openstack/compute/contrib/admin_actions.py:216 -#: nova/api/openstack/compute/contrib/admin_actions.py:235 -#: nova/api/openstack/compute/contrib/admin_actions.py:393 -#: nova/api/openstack/compute/contrib/multinic.py:43 +#: nova/api/openstack/compute/contrib/admin_actions.py:64 +#: nova/api/openstack/compute/contrib/admin_actions.py:86 +#: nova/api/openstack/compute/contrib/admin_actions.py:108 +#: nova/api/openstack/compute/contrib/admin_actions.py:130 +#: nova/api/openstack/compute/contrib/admin_actions.py:173 +#: nova/api/openstack/compute/contrib/admin_actions.py:192 +#: nova/api/openstack/compute/contrib/admin_actions.py:211 +#: nova/api/openstack/compute/contrib/admin_actions.py:230 +#: nova/api/openstack/compute/contrib/admin_actions.py:388 +#: nova/api/openstack/compute/contrib/multinic.py:44 #: nova/api/openstack/compute/contrib/rescue.py:45 #: nova/api/openstack/compute/contrib/shelve.py:43 msgid "Server not found" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:66 -#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 -#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 -msgid "Virt driver does not implement pause function." -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:70 -#, python-format -msgid "Compute.api::pause %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:91 -msgid "Virt driver does not implement unpause function." -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:95 -#, python-format -msgid "Compute.api::unpause %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:117 -#, python-format -msgid "compute.api::suspend %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:139 -#, python-format -msgid "compute.api::resume %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:165 -#, python-format -msgid "Error in migrate %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:184 -#, python-format -msgid "Compute.api::reset_network %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:203 -#, python-format -msgid "Compute.api::inject_network_info %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:220 -#, python-format -msgid "Compute.api::lock %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:239 -#, python-format -msgid "Compute.api::unlock %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/admin_actions.py:265 +#: nova/api/openstack/compute/contrib/admin_actions.py:260 #, python-format msgid "createBackup entity requires %s attribute" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:269 +#: nova/api/openstack/compute/contrib/admin_actions.py:264 msgid "Malformed createBackup entity" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:275 +#: nova/api/openstack/compute/contrib/admin_actions.py:270 msgid "createBackup attribute 'rotation' must be an integer" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:278 +#: nova/api/openstack/compute/contrib/admin_actions.py:273 msgid "createBackup attribute 'rotation' must be greater than or equal to zero" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:294 +#: nova/api/openstack/compute/contrib/admin_actions.py:289 #: nova/api/openstack/compute/contrib/console_output.py:46 #: nova/api/openstack/compute/contrib/server_start_stop.py:40 msgid "Instance not found" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:325 +#: nova/api/openstack/compute/contrib/admin_actions.py:320 msgid "" "host, block_migration and disk_over_commit must be specified for live " "migration." msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:362 +#: nova/api/openstack/compute/contrib/admin_actions.py:357 #, python-format msgid "Live migration of instance %s to another host failed" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:365 +#: nova/api/openstack/compute/contrib/admin_actions.py:360 #, python-format msgid "Live migration of instance %(id)s to host %(host)s failed" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:383 -#: nova/api/openstack/compute/plugins/v3/admin_actions.py:83 +#: nova/api/openstack/compute/contrib/admin_actions.py:378 #, python-format msgid "Desired state must be specified. Valid states are: %s" msgstr "" -#: nova/api/openstack/compute/contrib/admin_actions.py:397 +#: nova/api/openstack/compute/contrib/agents.py:100 +#: nova/api/openstack/compute/contrib/agents.py:118 +#: nova/api/openstack/compute/contrib/agents.py:156 +#: nova/api/openstack/compute/contrib/cloudpipe_update.py:55 #, python-format -msgid "Compute.api::resetState %s" +msgid "Invalid request body: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/aggregates.py:39 +msgid "Only host parameter can be specified" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:161 +#: nova/api/openstack/compute/contrib/aggregates.py:42 +msgid "Host parameter must be specified" +msgstr "" + +#: nova/api/openstack/compute/contrib/aggregates.py:168 #, python-format msgid "Aggregates does not have %s action" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:165 +#: nova/api/openstack/compute/contrib/aggregates.py:172 #: nova/api/openstack/compute/contrib/flavormanage.py:55 #: nova/api/openstack/compute/contrib/keypairs.py:86 #: nova/api/openstack/compute/plugins/v3/aggregates.py:169 msgid "Invalid request body" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:175 -#: nova/api/openstack/compute/contrib/aggregates.py:180 +#: nova/api/openstack/compute/contrib/aggregates.py:182 +#: nova/api/openstack/compute/contrib/aggregates.py:187 #, python-format msgid "Cannot add host %(host)s in aggregate %(id)s" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:194 -#: nova/api/openstack/compute/contrib/aggregates.py:198 +#: nova/api/openstack/compute/contrib/aggregates.py:201 +#: nova/api/openstack/compute/contrib/aggregates.py:205 #: nova/api/openstack/compute/plugins/v3/aggregates.py:153 #: nova/api/openstack/compute/plugins/v3/aggregates.py:157 #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:217 +#: nova/api/openstack/compute/contrib/aggregates.py:224 #: nova/api/openstack/compute/plugins/v3/aggregates.py:177 msgid "The value of metadata must be a dict" msgstr "" -#: nova/api/openstack/compute/contrib/aggregates.py:229 +#: nova/api/openstack/compute/contrib/aggregates.py:237 #, python-format msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" msgstr "" @@ -3091,28 +2926,32 @@ msgstr "" msgid "Delete snapshot with id: %s" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:104 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:105 msgid "Attach interface" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:119 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:154 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:177 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:169 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:158 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:184 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174 +#: nova/network/security_group/neutron_driver.py:510 +#: nova/network/security_group/neutron_driver.py:514 +#: nova/network/security_group/neutron_driver.py:518 +#: nova/network/security_group/neutron_driver.py:522 +#: nova/network/security_group/neutron_driver.py:526 msgid "Network driver does not support this function." msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:123 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:124 msgid "Failed to attach interface" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:130 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:131 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:134 msgid "Attachments update is not supported" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:142 -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:142 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:146 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144 #, python-format msgid "Detach interface %s" msgstr "" @@ -3126,40 +2965,33 @@ msgstr "" msgid "Must specify id or address" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:252 +#: nova/api/openstack/compute/contrib/cells.py:250 #, python-format msgid "Cell %(id)s not found." msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:285 -#: nova/api/openstack/compute/plugins/v3/cells.py:192 +#: nova/api/openstack/compute/contrib/cells.py:286 msgid "Cell name cannot be empty" msgstr "" #: nova/api/openstack/compute/contrib/cells.py:289 -#: nova/api/openstack/compute/plugins/v3/cells.py:196 msgid "Cell name cannot contain '!' or '.'" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:296 -#: nova/api/openstack/compute/plugins/v3/cells.py:203 +#: nova/api/openstack/compute/contrib/cells.py:295 msgid "Cell type must be 'parent' or 'child'" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:352 -#: nova/api/openstack/compute/contrib/cells.py:376 -#: nova/api/openstack/compute/plugins/v3/cells.py:259 -#: nova/api/openstack/compute/plugins/v3/cells.py:282 +#: nova/api/openstack/compute/contrib/cells.py:353 +#: nova/api/openstack/compute/contrib/cells.py:378 msgid "No cell information in request" msgstr "" #: nova/api/openstack/compute/contrib/cells.py:357 -#: nova/api/openstack/compute/plugins/v3/cells.py:264 msgid "No cell name in request" msgstr "" -#: nova/api/openstack/compute/contrib/cells.py:411 -#: nova/api/openstack/compute/plugins/v3/cells.py:319 +#: nova/api/openstack/compute/contrib/cells.py:415 msgid "Only 'updated_since', 'project_id' and 'deleted' are understood." msgstr "" @@ -3230,23 +3062,27 @@ msgstr "" msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "" -#: nova/api/openstack/compute/contrib/evacuate.py:53 -msgid "host and onSharedStorage must be specified." +#: nova/api/openstack/compute/contrib/evacuate.py:54 +msgid "host must be specified." msgstr "" #: nova/api/openstack/compute/contrib/evacuate.py:61 +msgid "onSharedStorage must be specified." +msgstr "" + +#: nova/api/openstack/compute/contrib/evacuate.py:69 #: nova/api/openstack/compute/plugins/v3/evacuate.py:67 msgid "admin password can't be changed on existing disk" msgstr "" -#: nova/api/openstack/compute/contrib/evacuate.py:71 -#: nova/api/openstack/compute/plugins/v3/evacuate.py:77 +#: nova/api/openstack/compute/contrib/evacuate.py:80 +#: nova/api/openstack/compute/plugins/v3/evacuate.py:78 #, python-format msgid "Compute host %s not found." msgstr "" -#: nova/api/openstack/compute/contrib/evacuate.py:77 -#: nova/api/openstack/compute/plugins/v3/evacuate.py:83 +#: nova/api/openstack/compute/contrib/evacuate.py:86 +#: nova/api/openstack/compute/plugins/v3/evacuate.py:84 msgid "The target host can't be the same one." msgstr "" @@ -3298,86 +3134,86 @@ msgstr "" msgid "DNS entries not found." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:129 -#: nova/api/openstack/compute/contrib/floating_ips.py:183 +#: nova/api/openstack/compute/contrib/floating_ips.py:130 +#: nova/api/openstack/compute/contrib/floating_ips.py:186 #, python-format msgid "Floating ip not found for id %s" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:162 +#: nova/api/openstack/compute/contrib/floating_ips.py:163 #, python-format msgid "No more floating ips in pool %s." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:164 +#: nova/api/openstack/compute/contrib/floating_ips.py:165 msgid "No more floating ips available." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:168 +#: nova/api/openstack/compute/contrib/floating_ips.py:169 #, python-format msgid "IP allocation over quota in pool %s." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:170 +#: nova/api/openstack/compute/contrib/floating_ips.py:171 msgid "IP allocation over quota." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:220 -#: nova/api/openstack/compute/contrib/floating_ips.py:285 -#: nova/api/openstack/compute/contrib/security_groups.py:482 +#: nova/api/openstack/compute/contrib/floating_ips.py:223 +#: nova/api/openstack/compute/contrib/floating_ips.py:288 +#: nova/api/openstack/compute/contrib/security_groups.py:488 msgid "Missing parameter dict" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:223 -#: nova/api/openstack/compute/contrib/floating_ips.py:288 +#: nova/api/openstack/compute/contrib/floating_ips.py:226 +#: nova/api/openstack/compute/contrib/floating_ips.py:291 msgid "Address not specified" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:229 +#: nova/api/openstack/compute/contrib/floating_ips.py:232 msgid "No nw_info cache associated with instance" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:234 +#: nova/api/openstack/compute/contrib/floating_ips.py:237 msgid "No fixed ips associated to instance" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:245 +#: nova/api/openstack/compute/contrib/floating_ips.py:248 msgid "Specified fixed address not assigned to instance" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:259 +#: nova/api/openstack/compute/contrib/floating_ips.py:262 msgid "floating ip is already associated" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:262 +#: nova/api/openstack/compute/contrib/floating_ips.py:265 msgid "l3driver call to add floating ip failed" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:265 -#: nova/api/openstack/compute/contrib/floating_ips.py:296 +#: nova/api/openstack/compute/contrib/floating_ips.py:268 +#: nova/api/openstack/compute/contrib/floating_ips.py:299 msgid "floating ip not found" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:270 +#: nova/api/openstack/compute/contrib/floating_ips.py:273 msgid "Error. Unable to associate floating ip" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:311 +#: nova/api/openstack/compute/contrib/floating_ips.py:314 msgid "Floating ip is not associated" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips.py:315 +#: nova/api/openstack/compute/contrib/floating_ips.py:318 #, python-format msgid "Floating ip %(address)s is not associated with instance %(id)s." msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:118 +#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:116 #: nova/api/openstack/compute/contrib/services.py:173 #: nova/api/openstack/compute/plugins/v3/services.py:124 msgid "Unknown action" msgstr "" -#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:146 +#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:144 #: nova/cmd/manage.py:417 #, python-format msgid "/%s should be specified as single address(es) not in cidr format" @@ -3387,79 +3223,79 @@ msgstr "" msgid "fping utility is not found." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:183 +#: nova/api/openstack/compute/contrib/hosts.py:185 #, python-format msgid "Invalid update setting: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:186 +#: nova/api/openstack/compute/contrib/hosts.py:188 #, python-format msgid "Invalid status: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:188 +#: nova/api/openstack/compute/contrib/hosts.py:190 #, python-format msgid "Invalid mode: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:190 +#: nova/api/openstack/compute/contrib/hosts.py:192 msgid "'status' or 'maintenance_mode' needed for host update" msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:206 -#: nova/api/openstack/compute/plugins/v3/hosts.py:134 +#: nova/api/openstack/compute/contrib/hosts.py:208 +#: nova/api/openstack/compute/plugins/v3/hosts.py:135 #, python-format msgid "Putting host %(host_name)s in maintenance mode %(mode)s." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:212 -#: nova/api/openstack/compute/plugins/v3/hosts.py:140 +#: nova/api/openstack/compute/contrib/hosts.py:214 +#: nova/api/openstack/compute/plugins/v3/hosts.py:141 msgid "Virt driver does not implement host maintenance mode." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:227 -#: nova/api/openstack/compute/plugins/v3/hosts.py:156 +#: nova/api/openstack/compute/contrib/hosts.py:229 +#: nova/api/openstack/compute/plugins/v3/hosts.py:157 #, python-format msgid "Enabling host %s." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:229 -#: nova/api/openstack/compute/plugins/v3/hosts.py:158 +#: nova/api/openstack/compute/contrib/hosts.py:231 +#: nova/api/openstack/compute/plugins/v3/hosts.py:159 #, python-format msgid "Disabling host %s." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:234 -#: nova/api/openstack/compute/plugins/v3/hosts.py:163 +#: nova/api/openstack/compute/contrib/hosts.py:236 +#: nova/api/openstack/compute/plugins/v3/hosts.py:164 msgid "Virt driver does not implement host disabled status." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:250 -#: nova/api/openstack/compute/plugins/v3/hosts.py:181 +#: nova/api/openstack/compute/contrib/hosts.py:252 +#: nova/api/openstack/compute/plugins/v3/hosts.py:182 msgid "Virt driver does not implement host power management." msgstr "" -#: nova/api/openstack/compute/contrib/hosts.py:336 -#: nova/api/openstack/compute/plugins/v3/hosts.py:274 +#: nova/api/openstack/compute/contrib/hosts.py:338 +#: nova/api/openstack/compute/plugins/v3/hosts.py:275 msgid "Describe-resource is admin only functionality" msgstr "" -#: nova/api/openstack/compute/contrib/hypervisors.py:193 -#: nova/api/openstack/compute/contrib/hypervisors.py:205 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:93 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:105 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:140 +#: nova/api/openstack/compute/contrib/hypervisors.py:208 +#: nova/api/openstack/compute/contrib/hypervisors.py:220 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:100 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:112 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:147 #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "" -#: nova/api/openstack/compute/contrib/hypervisors.py:213 -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:113 +#: nova/api/openstack/compute/contrib/hypervisors.py:228 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:120 msgid "Virt driver does not implement uptime function." msgstr "" -#: nova/api/openstack/compute/contrib/hypervisors.py:229 -#: nova/api/openstack/compute/contrib/hypervisors.py:239 +#: nova/api/openstack/compute/contrib/hypervisors.py:244 +#: nova/api/openstack/compute/contrib/hypervisors.py:254 #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "" @@ -3474,27 +3310,22 @@ msgstr "" msgid "Quota exceeded, too many key pairs." msgstr "" -#: nova/api/openstack/compute/contrib/multinic.py:54 +#: nova/api/openstack/compute/contrib/multinic.py:55 msgid "Missing 'networkId' argument for addFixedIp" msgstr "" -#: nova/api/openstack/compute/contrib/multinic.py:70 +#: nova/api/openstack/compute/contrib/multinic.py:75 msgid "Missing 'address' argument for removeFixedIp" msgstr "" -#: nova/api/openstack/compute/contrib/multinic.py:80 -#, python-format -msgid "Unable to find address %r" -msgstr "" - #: nova/api/openstack/compute/contrib/networks_associate.py:40 #: nova/api/openstack/compute/contrib/networks_associate.py:56 #: nova/api/openstack/compute/contrib/networks_associate.py:74 -#: nova/api/openstack/compute/contrib/os_networks.py:78 -#: nova/api/openstack/compute/contrib/os_networks.py:93 -#: nova/api/openstack/compute/contrib/os_networks.py:106 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:137 +#: nova/api/openstack/compute/contrib/os_networks.py:79 +#: nova/api/openstack/compute/contrib/os_networks.py:94 +#: nova/api/openstack/compute/contrib/os_networks.py:107 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:112 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:144 msgid "Network not found" msgstr "" @@ -3510,66 +3341,52 @@ msgstr "" msgid "Associate host is not implemented by the configured Network API" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:81 +#: nova/api/openstack/compute/contrib/os_networks.py:82 msgid "Disassociate network is not implemented by the configured Network API" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:100 -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 -#, python-format -msgid "Deleting network with id %s" -msgstr "" - -#: nova/api/openstack/compute/contrib/os_networks.py:118 +#: nova/api/openstack/compute/contrib/os_networks.py:119 msgid "Missing network in body" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:122 +#: nova/api/openstack/compute/contrib/os_networks.py:123 msgid "Network label is required" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:126 +#: nova/api/openstack/compute/contrib/os_networks.py:127 msgid "Network cidr or cidr_v6 is required" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:152 +#: nova/api/openstack/compute/contrib/os_networks.py:153 msgid "VLAN support must be enabled" msgstr "" -#: nova/api/openstack/compute/contrib/os_networks.py:155 +#: nova/api/openstack/compute/contrib/os_networks.py:156 #, python-format msgid "Cannot associate network %(network)s with project %(project)s: %(message)s" msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:83 -msgid "Failed to get default networks" -msgstr "" - -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:122 -msgid "Failed to update usages deallocating network." -msgstr "" - -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:168 msgid "No CIDR requested" msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:163 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:174 msgid "Requested network does not contain enough (2+) usable hosts" msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:167 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178 msgid "CIDR is malformed." msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:170 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:181 msgid "Address could not be converted." msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:189 msgid "Quota exceeded, too many networks." msgstr "" -#: nova/api/openstack/compute/contrib/os_tenant_networks.py:191 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:202 msgid "Create networks failed" msgstr "" @@ -3628,7 +3445,7 @@ msgid "Malformed scheduler_hints attribute" msgstr "" #: nova/api/openstack/compute/contrib/security_group_default_rules.py:127 -#: nova/api/openstack/compute/contrib/security_groups.py:387 +#: nova/api/openstack/compute/contrib/security_groups.py:394 msgid "Not enough parameters to build a valid rule." msgstr "" @@ -3640,81 +3457,80 @@ msgstr "" msgid "security group default rule not found" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:395 +#: nova/api/openstack/compute/contrib/security_groups.py:402 #, python-format msgid "Bad prefix for network in cidr %s" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:485 +#: nova/api/openstack/compute/contrib/security_groups.py:491 msgid "Security group not specified" msgstr "" -#: nova/api/openstack/compute/contrib/security_groups.py:489 +#: nova/api/openstack/compute/contrib/security_groups.py:495 msgid "Security group name cannot be empty" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:92 +#: nova/api/openstack/compute/contrib/server_external_events.py:93 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:65 #, python-format msgid "event entity requires key %(key)s" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:96 +#: nova/api/openstack/compute/contrib/server_external_events.py:97 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:69 #, python-format msgid "event entity contains unsupported items: %s" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:102 +#: nova/api/openstack/compute/contrib/server_external_events.py:103 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:75 #, python-format msgid "Invalid event status `%s'" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:121 -#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94 +#: nova/api/openstack/compute/contrib/server_external_events.py:126 #, python-format -msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s" +msgid "Creating event %(name)s:%(tag)s for instance %(instance_uuid)s" msgstr "" -#: nova/api/openstack/compute/contrib/server_external_events.py:130 +#: nova/api/openstack/compute/contrib/server_external_events.py:148 #: nova/api/openstack/compute/plugins/v3/server_external_events.py:103 msgid "No instances found for any event" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:161 +#: nova/api/openstack/compute/contrib/server_groups.py:163 msgid "Conflicting policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:166 +#: nova/api/openstack/compute/contrib/server_groups.py:168 #, python-format msgid "Invalid policies: %s" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:171 +#: nova/api/openstack/compute/contrib/server_groups.py:173 msgid "Duplicate policies configured!" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:176 +#: nova/api/openstack/compute/contrib/server_groups.py:178 msgid "the body is invalid." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:185 +#: nova/api/openstack/compute/contrib/server_groups.py:187 #, python-format msgid "'%s' is either missing or empty." msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:191 +#: nova/api/openstack/compute/contrib/server_groups.py:193 #, python-format msgid "Invalid format for name: '%s'" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:199 +#: nova/api/openstack/compute/contrib/server_groups.py:201 #, python-format msgid "'%s' is not a list" msgstr "" -#: nova/api/openstack/compute/contrib/server_groups.py:203 +#: nova/api/openstack/compute/contrib/server_groups.py:205 #, python-format msgid "unsupported fields: %s" msgstr "" @@ -3741,11 +3557,11 @@ msgstr "" msgid "Missing disabled reason field" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:230 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:231 msgid "Datetime is in invalid format" msgstr "" -#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:249 +#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:250 msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" @@ -3820,12 +3636,12 @@ msgstr "" msgid "Invalid request format for metadata" msgstr "" -#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:106 +#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:109 #, python-format msgid "Attach interface to %s" msgstr "" -#: nova/api/openstack/compute/plugins/v3/cells.py:187 +#: nova/api/openstack/compute/plugins/v3/cells.py:189 #, python-format msgid "Cell %s doesn't exist." msgstr "" @@ -3849,83 +3665,101 @@ msgstr "" msgid "Volume %(volume_id)s is not attached to the instance %(server_id)s" msgstr "" -#: nova/api/openstack/compute/plugins/v3/flavors.py:94 +#: nova/api/openstack/compute/plugins/v3/flavors.py:96 #, python-format msgid "Invalid min_ram filter [%s]" msgstr "" -#: nova/api/openstack/compute/plugins/v3/flavors.py:101 +#: nova/api/openstack/compute/plugins/v3/flavors.py:103 #, python-format msgid "Invalid min_disk filter [%s]" msgstr "" -#: nova/api/openstack/compute/plugins/v3/hypervisors.py:125 +#: nova/api/openstack/compute/plugins/v3/hypervisors.py:132 msgid "Need parameter 'query' to specify which hypervisor to filter on" msgstr "" +#: nova/api/openstack/compute/plugins/v3/pause_server.py:59 +#: nova/api/openstack/compute/plugins/v3/pause_server.py:81 +msgid "Virt driver does not implement pause function." +msgstr "" + #: nova/api/openstack/compute/plugins/v3/server_actions.py:76 #, python-format msgid "Action %s not found" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:212 +#: nova/api/openstack/compute/plugins/v3/server_diagnostics.py:46 +msgid "Unable to get diagnostics, functionality not implemented" +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94 +#, python-format +msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s" +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/servers.py:235 msgid "Invalid changes_since value" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:335 +#: nova/api/openstack/compute/plugins/v3/servers.py:306 +#, python-format +msgid "Flavor '%s' could not be found " +msgstr "" + +#: nova/api/openstack/compute/plugins/v3/servers.py:358 msgid "Unknown argument: port" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:343 +#: nova/api/openstack/compute/plugins/v3/servers.py:366 #, python-format msgid "" "Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port " "already has a Fixed IP allocated." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:412 -#: nova/api/openstack/compute/plugins/v3/servers.py:587 -msgid "The request body is invalid" +#: nova/api/openstack/compute/plugins/v3/servers.py:494 +#: nova/api/openstack/compute/plugins/v3/servers.py:522 +msgid "Invalid flavor_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:470 -#: nova/api/openstack/compute/plugins/v3/servers.py:498 -msgid "Invalid flavor_ref provided." +#: nova/api/openstack/compute/plugins/v3/servers.py:620 +msgid "The request body is invalid" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:598 +#: nova/api/openstack/compute/plugins/v3/servers.py:631 msgid "host_id cannot be updated." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:743 +#: nova/api/openstack/compute/plugins/v3/servers.py:782 msgid "Invalid image_ref provided." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:762 +#: nova/api/openstack/compute/plugins/v3/servers.py:801 msgid "Missing image_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:769 +#: nova/api/openstack/compute/plugins/v3/servers.py:808 msgid "Missing flavor_ref attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:782 +#: nova/api/openstack/compute/plugins/v3/servers.py:822 msgid "Resize request has invalid 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:785 +#: nova/api/openstack/compute/plugins/v3/servers.py:825 msgid "Resize requests require 'flavor_ref' attribute." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:801 +#: nova/api/openstack/compute/plugins/v3/servers.py:842 msgid "Could not parse image_ref from request." msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:885 +#: nova/api/openstack/compute/plugins/v3/servers.py:927 msgid "create_image entity requires name attribute" msgstr "" -#: nova/api/openstack/compute/plugins/v3/servers.py:947 +#: nova/api/openstack/compute/plugins/v3/servers.py:989 msgid "Invalid admin_password" msgstr "" @@ -3933,11 +3767,7 @@ msgstr "" msgid "Disabled reason contains invalid characters or is too long" msgstr "" -#: nova/api/openstack/compute/views/servers.py:197 -msgid "Instance has had its instance_type removed from the DB" -msgstr "" - -#: nova/api/validation/validators.py:62 +#: nova/api/validation/validators.py:73 #, python-format msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" @@ -3950,78 +3780,78 @@ msgid "" " use of it in production right now may be risky." msgstr "" -#: nova/cells/messaging.py:205 +#: nova/cells/messaging.py:204 #, python-format msgid "Error processing message locally: %(exc)s" msgstr "" -#: nova/cells/messaging.py:366 nova/cells/messaging.py:374 +#: nova/cells/messaging.py:365 nova/cells/messaging.py:373 #, python-format msgid "destination is %(target_cell)s but routing_path is %(routing_path)s" msgstr "" -#: nova/cells/messaging.py:386 +#: nova/cells/messaging.py:385 #, python-format msgid "Unknown %(cell_type)s when routing to %(target_cell)s" msgstr "" -#: nova/cells/messaging.py:410 +#: nova/cells/messaging.py:409 #, python-format msgid "Error locating next hop for message: %(exc)s" msgstr "" -#: nova/cells/messaging.py:437 +#: nova/cells/messaging.py:436 #, python-format msgid "Failed to send message to cell: %(next_hop)s: %(exc)s" msgstr "" -#: nova/cells/messaging.py:516 +#: nova/cells/messaging.py:515 #, python-format msgid "Error locating next hops for message: %(exc)s" msgstr "" -#: nova/cells/messaging.py:536 +#: nova/cells/messaging.py:535 #, python-format msgid "Error sending message to next hops: %(exc)s" msgstr "" -#: nova/cells/messaging.py:554 +#: nova/cells/messaging.py:553 #, python-format msgid "Error waiting for responses from neighbor cells: %(exc)s" msgstr "" -#: nova/cells/messaging.py:665 +#: nova/cells/messaging.py:664 #, python-format msgid "Unknown method '%(method)s' in compute API" msgstr "" -#: nova/cells/messaging.py:1103 +#: nova/cells/messaging.py:1106 #, python-format msgid "Got message to create instance fault: %(instance_fault)s" msgstr "" -#: nova/cells/messaging.py:1126 +#: nova/cells/messaging.py:1129 #, python-format msgid "" "Forcing a sync of instances, project_id=%(projid_str)s, " "updated_since=%(since_str)s" msgstr "" -#: nova/cells/messaging.py:1205 +#: nova/cells/messaging.py:1208 #, python-format msgid "No match when trying to update BDM: %(bdm)s" msgstr "" -#: nova/cells/messaging.py:1680 +#: nova/cells/messaging.py:1683 #, python-format msgid "No cell_name for %(method)s() from API" msgstr "" -#: nova/cells/messaging.py:1697 +#: nova/cells/messaging.py:1700 msgid "No cell_name for instance update from API" msgstr "" -#: nova/cells/messaging.py:1860 +#: nova/cells/messaging.py:1863 #, python-format msgid "Returning exception %s to caller" msgstr "" @@ -4034,33 +3864,38 @@ msgstr "" msgid "Failed to notify cells of BDM destroy." msgstr "" -#: nova/cells/scheduler.py:192 +#: nova/cells/scheduler.py:191 #, python-format msgid "Couldn't communicate with cell '%s'" msgstr "" -#: nova/cells/scheduler.py:196 +#: nova/cells/scheduler.py:195 msgid "Couldn't communicate with any cells" msgstr "" -#: nova/cells/scheduler.py:234 +#: nova/cells/scheduler.py:233 #, python-format msgid "" "No cells available when scheduling. Will retry in %(sleep_time)s " "second(s)" msgstr "" -#: nova/cells/scheduler.py:240 +#: nova/cells/scheduler.py:239 #, python-format msgid "Error scheduling instances %(instance_uuids)s" msgstr "" -#: nova/cells/state.py:352 +#: nova/cells/state.py:182 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/cells/state.py:363 #, python-format msgid "Unknown cell '%(cell_name)s' when trying to update capabilities" msgstr "" -#: nova/cells/state.py:367 +#: nova/cells/state.py:378 #, python-format msgid "Unknown cell '%(cell_name)s' when trying to update capacities" msgstr "" @@ -4191,17 +4026,17 @@ msgstr "" msgid "No db access allowed in nova-compute: %s" msgstr "" -#: nova/cmd/dhcpbridge.py:109 +#: nova/cmd/dhcpbridge.py:108 #, python-format msgid "No db access allowed in nova-dhcpbridge: %s" msgstr "" -#: nova/cmd/dhcpbridge.py:132 +#: nova/cmd/dhcpbridge.py:131 #, python-format msgid "Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'" msgstr "" -#: nova/cmd/dhcpbridge.py:142 +#: nova/cmd/dhcpbridge.py:141 msgid "Environment variable 'NETWORK_ID' must be set." msgstr "" @@ -4525,1159 +4360,961 @@ msgstr "" msgid "No db access allowed in nova-network: %s" msgstr "" -#: nova/compute/api.py:353 +#: nova/compute/api.py:355 msgid "Cannot run any more instances of this type." msgstr "" -#: nova/compute/api.py:360 +#: nova/compute/api.py:362 #, python-format msgid "Can only run %s more instances of this type." msgstr "" -#: nova/compute/api.py:372 +#: nova/compute/api.py:374 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d " "instances. %(msg)s" msgstr "" -#: nova/compute/api.py:376 +#: nova/compute/api.py:378 #, python-format msgid "" "%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d " "and %(max_count)d instances. %(msg)s" msgstr "" -#: nova/compute/api.py:397 +#: nova/compute/api.py:399 msgid "Metadata type should be dict." msgstr "" -#: nova/compute/api.py:403 -#, python-format -msgid "" -"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " -"properties" -msgstr "" - -#: nova/compute/api.py:415 -#, python-format -msgid "Metadata property key '%s' is not a string." -msgstr "" - -#: nova/compute/api.py:418 -#, python-format -msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string." -msgstr "" - -#: nova/compute/api.py:422 -msgid "Metadata property key blank" -msgstr "" - -#: nova/compute/api.py:425 +#: nova/compute/api.py:421 msgid "Metadata property key greater than 255 characters" msgstr "" -#: nova/compute/api.py:428 +#: nova/compute/api.py:424 msgid "Metadata property value greater than 255 characters" msgstr "" -#: nova/compute/api.py:565 -msgid "Failed to set instance name using multi_instance_display_name_template." -msgstr "" - -#: nova/compute/api.py:667 +#: nova/compute/api.py:663 msgid "Cannot attach one or more volumes to multiple instances" msgstr "" -#: nova/compute/api.py:709 +#: nova/compute/api.py:705 msgid "The requested availability zone is not available" msgstr "" -#: nova/compute/api.py:1110 +#: nova/compute/api.py:1107 msgid "" "Images with destination_type 'volume' need to have a non-zero size " "specified" msgstr "" -#: nova/compute/api.py:1141 +#: nova/compute/api.py:1138 msgid "More than one swap drive requested." msgstr "" -#: nova/compute/api.py:1290 -#: nova/tests/api/openstack/compute/test_servers.py:3145 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2484 +#: nova/compute/api.py:1277 +#: nova/tests/api/openstack/compute/test_servers.py:3199 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2488 msgid "" "Unable to launch multiple instances with a single configured port ID. " "Please launch your instance one by one with different ports." msgstr "" -#: nova/compute/api.py:1311 +#: nova/compute/api.py:1298 msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "" -#: nova/compute/api.py:1415 +#: nova/compute/api.py:1404 msgid "instance termination disabled" msgstr "" -#: nova/compute/api.py:1430 +#: nova/compute/api.py:1418 #, python-format msgid "Working on deleting snapshot %s from shelved instance..." msgstr "" -#: nova/compute/api.py:1437 +#: nova/compute/api.py:1425 #, python-format msgid "Failed to delete snapshot from shelved instance (%s)." msgstr "" -#: nova/compute/api.py:1441 -msgid "" -"Something wrong happened when trying to delete snapshot from shelved " -"instance." -msgstr "" - -#: nova/compute/api.py:1506 +#: nova/compute/api.py:1486 msgid "Instance is already in deleting state, ignoring this request" msgstr "" -#: nova/compute/api.py:1553 +#: nova/compute/api.py:1521 #, python-format msgid "" "Found an unconfirmed migration during delete, id: %(id)s, status: " "%(status)s" msgstr "" -#: nova/compute/api.py:1563 +#: nova/compute/api.py:1531 msgid "Instance may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1580 +#: nova/compute/api.py:1548 #, python-format msgid "Migration %s may have been confirmed during delete" msgstr "" -#: nova/compute/api.py:1615 +#: nova/compute/api.py:1583 #, python-format msgid "Flavor %d not found" msgstr "" -#: nova/compute/api.py:1633 +#: nova/compute/api.py:1603 #, python-format msgid "instance's host %s is down, deleting from database" msgstr "" -#: nova/compute/api.py:1660 +#: nova/compute/api.py:1630 #, python-format msgid "Ignoring volume cleanup failure due to %s" msgstr "" -#: nova/compute/api.py:2061 +#: nova/compute/api.py:2030 #, python-format msgid "snapshot for %s" msgstr "" -#: nova/compute/api.py:2399 +#: nova/compute/api.py:2368 msgid "Resize to zero disk flavor is not allowed." msgstr "" -#: nova/compute/api.py:2438 +#: nova/compute/api.py:2407 #, python-format msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance." msgstr "" -#: nova/compute/api.py:2613 +#: nova/compute/api.py:2582 msgid "Cannot rescue a volume-backed instance" msgstr "" -#: nova/compute/api.py:2840 +#: nova/compute/api.py:2809 msgid "Volume must be attached in order to detach." msgstr "" -#: nova/compute/api.py:2860 +#: nova/compute/api.py:2829 msgid "Old volume is attached to a different instance." msgstr "" -#: nova/compute/api.py:2863 +#: nova/compute/api.py:2832 msgid "New volume must be detached in order to swap." msgstr "" -#: nova/compute/api.py:2866 +#: nova/compute/api.py:2835 msgid "New volume must be the same size or larger." msgstr "" -#: nova/compute/api.py:3067 +#: nova/compute/api.py:3042 #, python-format msgid "Instance compute service state on %s expected to be down, but it was up." msgstr "" -#: nova/compute/api.py:3369 +#: nova/compute/api.py:3347 msgid "Host aggregate is not empty" msgstr "" -#: nova/compute/api.py:3402 +#: nova/compute/api.py:3380 #, python-format msgid "More than 1 AZ for host %s" msgstr "" -#: nova/compute/api.py:3437 +#: nova/compute/api.py:3415 #, python-format msgid "Host already in availability zone %s" msgstr "" -#: nova/compute/api.py:3525 nova/tests/compute/test_keypairs.py:135 +#: nova/compute/api.py:3503 nova/tests/compute/test_keypairs.py:137 msgid "Keypair name contains unsafe characters" msgstr "" -#: nova/compute/api.py:3529 nova/tests/compute/test_keypairs.py:127 -#: nova/tests/compute/test_keypairs.py:131 -msgid "Keypair name must be between 1 and 255 characters long" +#: nova/compute/api.py:3509 nova/tests/compute/test_keypairs.py:127 +#: nova/tests/compute/test_keypairs.py:132 +msgid "Keypair name must be string and between 1 and 255 characters long" msgstr "" -#: nova/compute/api.py:3617 +#: nova/compute/api.py:3597 #, python-format msgid "Security group %s is not a string or unicode" msgstr "" -#: nova/compute/api.py:3620 -#, python-format -msgid "Security group %s cannot be empty." -msgstr "" - -#: nova/compute/api.py:3628 +#: nova/compute/api.py:3607 #, python-format msgid "" "Value (%(value)s) for parameter Group%(property)s is invalid. Content " "limited to '%(allowed)s'." msgstr "" -#: nova/compute/api.py:3634 -#, python-format -msgid "Security group %s should not be greater than 255 characters." -msgstr "" - -#: nova/compute/api.py:3652 +#: nova/compute/api.py:3627 msgid "Quota exceeded, too many security groups." msgstr "" -#: nova/compute/api.py:3655 +#: nova/compute/api.py:3630 #, python-format msgid "Create Security Group %s" msgstr "" -#: nova/compute/api.py:3667 +#: nova/compute/api.py:3642 #, python-format msgid "Security group %s already exists" msgstr "" -#: nova/compute/api.py:3680 +#: nova/compute/api.py:3655 #, python-format msgid "Unable to update system group '%s'" msgstr "" -#: nova/compute/api.py:3742 +#: nova/compute/api.py:3717 #, python-format msgid "Unable to delete system group '%s'" msgstr "" -#: nova/compute/api.py:3747 +#: nova/compute/api.py:3722 msgid "Security group is still in use" msgstr "" -#: nova/compute/api.py:3757 -msgid "Failed to update usages deallocating security group" -msgstr "" - -#: nova/compute/api.py:3760 +#: nova/compute/api.py:3735 #, python-format msgid "Delete security group %s" msgstr "" -#: nova/compute/api.py:3836 nova/compute/api.py:3919 +#: nova/compute/api.py:3811 nova/compute/api.py:3894 #, python-format msgid "Rule (%s) not found" msgstr "" -#: nova/compute/api.py:3852 +#: nova/compute/api.py:3827 msgid "Quota exceeded, too many security group rules." msgstr "" -#: nova/compute/api.py:3855 +#: nova/compute/api.py:3830 #, python-format msgid "" "Security group %(name)s added %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3870 +#: nova/compute/api.py:3845 #, python-format msgid "" "Security group %(name)s removed %(protocol)s ingress " "(%(from_port)s:%(to_port)s)" msgstr "" -#: nova/compute/api.py:3926 +#: nova/compute/api.py:3901 msgid "Security group id should be integer" msgstr "" -#: nova/compute/claims.py:135 +#: nova/compute/claims.py:126 #, python-format -msgid "" -"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs " -"%(vcpus)d" +msgid "Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB" msgstr "" -#: nova/compute/claims.py:150 +#: nova/compute/claims.py:140 msgid "Claim successful" msgstr "" -#: nova/compute/claims.py:153 +#: nova/compute/claims.py:143 msgid "memory" msgstr "" -#: nova/compute/claims.py:162 +#: nova/compute/claims.py:152 msgid "disk" msgstr "" -#: nova/compute/claims.py:177 nova/compute/claims.py:249 +#: nova/compute/claims.py:167 nova/compute/claims.py:230 msgid "Claim pci failed." msgstr "" -#: nova/compute/claims.py:180 -msgid "CPUs" -msgstr "" - -#: nova/compute/claims.py:192 +#: nova/compute/claims.py:177 #, python-format msgid "Total %(type)s: %(total)d %(unit)s, used: %(used).02f %(unit)s" msgstr "" -#: nova/compute/claims.py:199 +#: nova/compute/claims.py:184 #, python-format msgid "%(type)s limit not specified, defaulting to unlimited" msgstr "" -#: nova/compute/claims.py:206 +#: nova/compute/claims.py:191 #, python-format msgid "%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f %(unit)s" msgstr "" -#: nova/compute/claims.py:212 +#: nova/compute/claims.py:197 #, python-format msgid "Free %(type)s %(free).02f %(unit)s < requested %(requested)d %(unit)s" msgstr "" -#: nova/compute/flavors.py:109 +#: nova/compute/flavors.py:110 msgid "" "Flavor names can only contain alphanumeric characters, periods, dashes, " "underscores and spaces." msgstr "" -#: nova/compute/flavors.py:119 +#: nova/compute/flavors.py:120 msgid "id cannot contain leading and/or trailing whitespace(s)" msgstr "" -#: nova/compute/flavors.py:129 +#: nova/compute/flavors.py:130 msgid "" "Flavor id can only contain letters from A-Z (both cases), periods, " "dashes, underscores and spaces." msgstr "" -#: nova/compute/flavors.py:150 +#: nova/compute/flavors.py:151 #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" -#: nova/compute/flavors.py:161 +#: nova/compute/flavors.py:162 msgid "is_public must be a boolean" msgstr "" -#: nova/compute/flavors.py:166 -#, python-format -msgid "DB error: %s" -msgstr "" - -#: nova/compute/flavors.py:177 -#, python-format -msgid "Instance type %s not found for deletion" -msgstr "" - -#: nova/compute/flavors.py:327 +#: nova/compute/flavors.py:328 msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" -#: nova/compute/manager.py:283 +#: nova/compute/manager.py:284 #, python-format msgid "Task possibly preempted: %s" msgstr "" -#: nova/compute/manager.py:365 nova/compute/manager.py:2885 -#, python-format -msgid "Error while trying to clean up image %s" -msgstr "" - -#: nova/compute/manager.py:506 +#: nova/compute/manager.py:508 msgid "Instance event failed" msgstr "" -#: nova/compute/manager.py:605 +#: nova/compute/manager.py:608 #, python-format msgid "%s is not a valid node managed by this compute host." msgstr "" -#: nova/compute/manager.py:704 +#: nova/compute/manager.py:714 #, python-format msgid "" "Deleting instance as its host (%(instance_host)s) is not equal to our " "host (%(our_host)s)." msgstr "" -#: nova/compute/manager.py:719 +#: nova/compute/manager.py:729 msgid "Instance has been marked deleted already, removing it from the hypervisor." msgstr "" -#: nova/compute/manager.py:739 +#: nova/compute/manager.py:749 msgid "" "Hypervisor driver does not support instance shared storage check, " "assuming it's not on shared storage" msgstr "" -#: nova/compute/manager.py:745 -msgid "Failed to check if instance shared" -msgstr "" - -#: nova/compute/manager.py:811 nova/compute/manager.py:862 -msgid "Failed to complete a deletion" -msgstr "" - -#: nova/compute/manager.py:844 +#: nova/compute/manager.py:854 msgid "" "Service started deleting the instance during the previous run, but did " "not finish. Restarting the deletion now." msgstr "" -#: nova/compute/manager.py:885 +#: nova/compute/manager.py:895 #, python-format msgid "" "Instance in transitional state (%(task_state)s) at start-up and power " "state is (%(power_state)s), clearing task state" msgstr "" -#: nova/compute/manager.py:903 -msgid "Failed to stop instance" -msgstr "" - -#: nova/compute/manager.py:915 -msgid "Failed to start instance" -msgstr "" - -#: nova/compute/manager.py:940 -msgid "Failed to revert crashed migration" -msgstr "" - -#: nova/compute/manager.py:943 +#: nova/compute/manager.py:953 msgid "Instance found in migrating state during startup. Resetting task_state" msgstr "" -#: nova/compute/manager.py:960 +#: nova/compute/manager.py:970 msgid "Rebooting instance after nova-compute restart." msgstr "" -#: nova/compute/manager.py:970 +#: nova/compute/manager.py:980 msgid "Hypervisor driver does not support resume guests" msgstr "" -#: nova/compute/manager.py:975 +#: nova/compute/manager.py:985 msgid "Failed to resume instance" msgstr "" -#: nova/compute/manager.py:984 +#: nova/compute/manager.py:994 msgid "Hypervisor driver does not support firewall rules" msgstr "" -#: nova/compute/manager.py:1009 +#: nova/compute/manager.py:1019 #, python-format msgid "VM %(state)s (Lifecycle Event)" msgstr "" -#: nova/compute/manager.py:1025 +#: nova/compute/manager.py:1035 #, python-format msgid "Unexpected power state %d" msgstr "" -#: nova/compute/manager.py:1130 +#: nova/compute/manager.py:1140 msgid "Hypervisor driver does not support security groups." msgstr "" -#: nova/compute/manager.py:1168 +#: nova/compute/manager.py:1178 #, python-format msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" -#: nova/compute/manager.py:1225 nova/compute/manager.py:1982 +#: nova/compute/manager.py:1235 nova/compute/manager.py:2057 msgid "Success" msgstr "" -#: nova/compute/manager.py:1249 +#: nova/compute/manager.py:1259 msgid "Instance disappeared before we could start it" msgstr "" -#: nova/compute/manager.py:1276 +#: nova/compute/manager.py:1286 msgid "Anti-affinity instance group policy was violated." msgstr "" -#: nova/compute/manager.py:1353 -msgid "Failed to dealloc network for deleted instance" -msgstr "" - -#: nova/compute/manager.py:1358 +#: nova/compute/manager.py:1369 msgid "Instance disappeared during build" msgstr "" -#: nova/compute/manager.py:1374 -msgid "Failed to dealloc network for failed instance" -msgstr "" - -#: nova/compute/manager.py:1401 +#: nova/compute/manager.py:1412 #, python-format msgid "Error: %s" msgstr "" -#: nova/compute/manager.py:1447 nova/compute/manager.py:3509 -msgid "Error trying to reschedule" -msgstr "" - -#: nova/compute/manager.py:1503 +#: nova/compute/manager.py:1514 msgid "Instance build timed out. Set to error state." msgstr "" -#: nova/compute/manager.py:1513 nova/compute/manager.py:1873 +#: nova/compute/manager.py:1524 nova/compute/manager.py:1888 msgid "Starting instance..." msgstr "" -#: nova/compute/manager.py:1531 +#: nova/compute/manager.py:1542 #, python-format msgid "" "Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0." msgstr "" -#: nova/compute/manager.py:1556 -#, python-format -msgid "Instance failed network setup after %(attempts)d attempt(s)" -msgstr "" - -#: nova/compute/manager.py:1560 +#: nova/compute/manager.py:1571 #, python-format msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" -#: nova/compute/manager.py:1741 -msgid "Instance failed block device setup" -msgstr "" - -#: nova/compute/manager.py:1761 nova/compute/manager.py:2098 -#: nova/compute/manager.py:4041 -msgid "Instance failed to spawn" -msgstr "" - -#: nova/compute/manager.py:1941 -msgid "Unexpected build failure, not rescheduling build." -msgstr "" - -#: nova/compute/manager.py:2006 +#: nova/compute/manager.py:2020 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2012 nova/compute/manager.py:2060 -msgid "Failed to allocate network(s)" -msgstr "" - -#: nova/compute/manager.py:2016 nova/compute/manager.py:2062 +#: nova/compute/manager.py:2030 nova/compute/manager.py:2080 msgid "Failed to allocate the network(s), not rescheduling." msgstr "" -#: nova/compute/manager.py:2086 -msgid "Failure prepping block device" -msgstr "" - -#: nova/compute/manager.py:2088 +#: nova/compute/manager.py:2106 msgid "Failure prepping block device." msgstr "" -#: nova/compute/manager.py:2111 +#: nova/compute/manager.py:2127 msgid "Could not clean up failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2121 -msgid "Failed to deallocate networks" -msgstr "" - -#: nova/compute/manager.py:2142 -msgid "Failed to cleanup volumes for failed build, not rescheduling" -msgstr "" - -#: nova/compute/manager.py:2181 +#: nova/compute/manager.py:2185 msgid "Failed to deallocate network for instance." msgstr "" -#: nova/compute/manager.py:2202 +#: nova/compute/manager.py:2206 #, python-format msgid "%(action_str)s instance" msgstr "" -#: nova/compute/manager.py:2246 -#, python-format -msgid "Ignoring DiskNotFound: %s" -msgstr "" - -#: nova/compute/manager.py:2249 -#, python-format -msgid "Ignoring VolumeNotFound: %s" -msgstr "" - -#: nova/compute/manager.py:2353 +#: nova/compute/manager.py:2361 msgid "Instance disappeared during terminate" msgstr "" -#: nova/compute/manager.py:2359 nova/compute/manager.py:3689 -#: nova/compute/manager.py:5769 -msgid "Setting instance vm_state to ERROR" -msgstr "" - -#: nova/compute/manager.py:2539 +#: nova/compute/manager.py:2547 msgid "Rebuilding instance" msgstr "" -#: nova/compute/manager.py:2552 +#: nova/compute/manager.py:2560 msgid "Invalid state of instance files on shared storage" msgstr "" -#: nova/compute/manager.py:2556 +#: nova/compute/manager.py:2564 msgid "disk on shared storage, recreating using existing disk" msgstr "" -#: nova/compute/manager.py:2560 +#: nova/compute/manager.py:2568 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "" -#: nova/compute/manager.py:2571 nova/compute/manager.py:4884 -#, python-format -msgid "Failed to get compute_info for %s" -msgstr "" - -#: nova/compute/manager.py:2647 +#: nova/compute/manager.py:2655 #, python-format msgid "bringing vm to original state: '%s'" msgstr "" -#: nova/compute/manager.py:2678 +#: nova/compute/manager.py:2686 #, python-format msgid "Detaching from volume api: %s" msgstr "" -#: nova/compute/manager.py:2705 +#: nova/compute/manager.py:2713 msgid "Rebooting instance" msgstr "" -#: nova/compute/manager.py:2722 +#: nova/compute/manager.py:2730 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " "%(running)s)" msgstr "" -#: nova/compute/manager.py:2758 +#: nova/compute/manager.py:2766 msgid "Reboot failed but instance is running" msgstr "" -#: nova/compute/manager.py:2766 +#: nova/compute/manager.py:2774 #, python-format msgid "Cannot reboot instance: %s" msgstr "" -#: nova/compute/manager.py:2778 +#: nova/compute/manager.py:2786 msgid "Instance disappeared during reboot" msgstr "" -#: nova/compute/manager.py:2846 +#: nova/compute/manager.py:2854 msgid "instance snapshotting" msgstr "" -#: nova/compute/manager.py:2852 +#: nova/compute/manager.py:2860 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " "%(running)s)" msgstr "" -#: nova/compute/manager.py:2890 +#: nova/compute/manager.py:2893 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:2898 msgid "Image not found during snapshot" msgstr "" -#: nova/compute/manager.py:2972 +#: nova/compute/manager.py:2980 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "" -#: nova/compute/manager.py:2979 +#: nova/compute/manager.py:2987 msgid "Root password set" msgstr "" -#: nova/compute/manager.py:2984 +#: nova/compute/manager.py:2992 msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" -#: nova/compute/manager.py:2997 -#, python-format -msgid "set_admin_password failed: %s" -msgstr "" - -#: nova/compute/manager.py:3003 +#: nova/compute/manager.py:3011 msgid "error setting admin password" msgstr "" -#: nova/compute/manager.py:3019 +#: nova/compute/manager.py:3027 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " "expected: %(expected_state)s)" msgstr "" -#: nova/compute/manager.py:3024 +#: nova/compute/manager.py:3032 #, python-format msgid "injecting file to %s" msgstr "" -#: nova/compute/manager.py:3042 +#: nova/compute/manager.py:3050 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" msgstr "" -#: nova/compute/manager.py:3061 +#: nova/compute/manager.py:3069 msgid "Rescuing" msgstr "" -#: nova/compute/manager.py:3082 -msgid "Error trying to Rescue Instance" -msgstr "" - -#: nova/compute/manager.py:3086 +#: nova/compute/manager.py:3094 #, python-format msgid "Driver Error: %s" msgstr "" -#: nova/compute/manager.py:3109 +#: nova/compute/manager.py:3117 msgid "Unrescuing" msgstr "" -#: nova/compute/manager.py:3180 +#: nova/compute/manager.py:3188 #, python-format msgid "Migration %s is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3185 +#: nova/compute/manager.py:3193 #, python-format msgid "Migration %s is already confirmed" msgstr "" -#: nova/compute/manager.py:3189 +#: nova/compute/manager.py:3197 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " "confirmation process" msgstr "" -#: nova/compute/manager.py:3203 +#: nova/compute/manager.py:3211 msgid "Instance is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3384 +#: nova/compute/manager.py:3392 #, python-format msgid "Updating instance to original state: '%s'" msgstr "" -#: nova/compute/manager.py:3407 +#: nova/compute/manager.py:3415 msgid "Instance has no source host" msgstr "" -#: nova/compute/manager.py:3413 +#: nova/compute/manager.py:3421 msgid "destination same as source!" msgstr "" -#: nova/compute/manager.py:3431 +#: nova/compute/manager.py:3439 msgid "Migrating" msgstr "" -#: nova/compute/manager.py:3695 -#, python-format -msgid "Failed to rollback quota for failed finish_resize: %s" -msgstr "" - -#: nova/compute/manager.py:3755 +#: nova/compute/manager.py:3771 msgid "Pausing" msgstr "" -#: nova/compute/manager.py:3772 +#: nova/compute/manager.py:3788 msgid "Unpausing" msgstr "" -#: nova/compute/manager.py:3813 nova/compute/manager.py:3830 +#: nova/compute/manager.py:3829 nova/compute/manager.py:3846 msgid "Retrieving diagnostics" msgstr "" -#: nova/compute/manager.py:3866 +#: nova/compute/manager.py:3882 msgid "Resuming" msgstr "" -#: nova/compute/manager.py:4084 +#: nova/compute/manager.py:4102 msgid "Get console output" msgstr "" -#: nova/compute/manager.py:4283 +#: nova/compute/manager.py:4301 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4292 -#, python-format -msgid "Failed to attach %(volume_id)s at %(mountpoint)s" -msgstr "" - -#: nova/compute/manager.py:4308 +#: nova/compute/manager.py:4326 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "" -#: nova/compute/manager.py:4319 +#: nova/compute/manager.py:4337 msgid "Detaching volume from unknown instance" msgstr "" -#: nova/compute/manager.py:4331 -#, python-format -msgid "Failed to detach volume %(volume_id)s from %(mp)s" -msgstr "" - -#: nova/compute/manager.py:4404 -#, python-format -msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" -msgstr "" - -#: nova/compute/manager.py:4411 -#, python-format -msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" -msgstr "" - -#: nova/compute/manager.py:4504 +#: nova/compute/manager.py:4525 #, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "" -#: nova/compute/manager.py:4524 +#: nova/compute/manager.py:4549 #, python-format msgid "Port %s is not attached" msgstr "" -#: nova/compute/manager.py:4536 nova/tests/compute/test_compute.py:10612 +#: nova/compute/manager.py:4561 nova/tests/compute/test_compute.py:10659 #, python-format msgid "Host %s not found" msgstr "" -#: nova/compute/manager.py:4690 -#, python-format -msgid "Pre live migration failed at %s" -msgstr "" - -#: nova/compute/manager.py:4753 +#: nova/compute/manager.py:4779 msgid "_post_live_migration() is started.." msgstr "" -#: nova/compute/manager.py:4825 +#: nova/compute/manager.py:4855 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "" -#: nova/compute/manager.py:4827 +#: nova/compute/manager.py:4857 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." msgstr "" -#: nova/compute/manager.py:4852 +#: nova/compute/manager.py:4882 msgid "Post operation of migration started" msgstr "" -#: nova/compute/manager.py:5057 +#: nova/compute/manager.py:5087 msgid "An error occurred while refreshing the network cache." msgstr "" -#: nova/compute/manager.py:5110 +#: nova/compute/manager.py:5140 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" msgstr "" -#: nova/compute/manager.py:5115 +#: nova/compute/manager.py:5145 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "" -#: nova/compute/manager.py:5124 +#: nova/compute/manager.py:5154 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" msgstr "" -#: nova/compute/manager.py:5134 +#: nova/compute/manager.py:5164 #, python-format msgid "Instance %s not found" msgstr "" -#: nova/compute/manager.py:5139 +#: nova/compute/manager.py:5169 msgid "In ERROR state" msgstr "" -#: nova/compute/manager.py:5146 +#: nova/compute/manager.py:5176 #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "" -#: nova/compute/manager.py:5157 +#: nova/compute/manager.py:5187 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" -#: nova/compute/manager.py:5186 -msgid "Periodic task failed to offload instance." -msgstr "" - -#: nova/compute/manager.py:5206 +#: nova/compute/manager.py:5236 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." msgstr "" -#: nova/compute/manager.py:5226 -#, python-format -msgid "Failed to generate usage audit for instance on host %s" -msgstr "" - -#: nova/compute/manager.py:5255 +#: nova/compute/manager.py:5285 msgid "Updating bandwidth usage cache" msgstr "" -#: nova/compute/manager.py:5277 +#: nova/compute/manager.py:5307 msgid "Bandwidth usage not supported by hypervisor." msgstr "" -#: nova/compute/manager.py:5400 +#: nova/compute/manager.py:5430 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." msgstr "" -#: nova/compute/manager.py:5466 +#: nova/compute/manager.py:5496 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" msgstr "" -#: nova/compute/manager.py:5479 +#: nova/compute/manager.py:5509 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" -#: nova/compute/manager.py:5504 +#: nova/compute/manager.py:5534 msgid "Instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5516 nova/compute/manager.py:5525 -#: nova/compute/manager.py:5556 nova/compute/manager.py:5567 -msgid "error during stop() in sync_power_state." -msgstr "" - -#: nova/compute/manager.py:5520 +#: nova/compute/manager.py:5553 msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5536 +#: nova/compute/manager.py:5569 msgid "Instance is paused unexpectedly. Ignore." msgstr "" -#: nova/compute/manager.py:5542 +#: nova/compute/manager.py:5575 msgid "Instance is unexpectedly not found. Ignore." msgstr "" -#: nova/compute/manager.py:5548 +#: nova/compute/manager.py:5581 msgid "Instance is not stopped. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5562 +#: nova/compute/manager.py:5595 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5576 +#: nova/compute/manager.py:5609 msgid "Instance is not (soft-)deleted." msgstr "" -#: nova/compute/manager.py:5605 +#: nova/compute/manager.py:5639 msgid "Reclaiming deleted instance" msgstr "" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5643 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5634 +#: nova/compute/manager.py:5668 #, python-format msgid "Deleting orphan compute node %s" msgstr "" -#: nova/compute/manager.py:5642 nova/compute/resource_tracker.py:391 +#: nova/compute/manager.py:5676 nova/compute/resource_tracker.py:406 #, python-format msgid "No service record for host %s" msgstr "" -#: nova/compute/manager.py:5682 +#: nova/compute/manager.py:5716 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5688 +#: nova/compute/manager.py:5722 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" " still present on host." msgstr "" -#: nova/compute/manager.py:5697 +#: nova/compute/manager.py:5731 msgid "set_bootable is not implemented for the current driver" msgstr "" -#: nova/compute/manager.py:5702 +#: nova/compute/manager.py:5736 msgid "Failed to power off instance" msgstr "" -#: nova/compute/manager.py:5706 +#: nova/compute/manager.py:5740 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5716 +#: nova/compute/manager.py:5750 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5720 +#: nova/compute/manager.py:5754 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "" -#: nova/compute/manager.py:5752 +#: nova/compute/manager.py:5786 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "" -#: nova/compute/manager.py:5762 +#: nova/compute/manager.py:5796 #, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "" -#: nova/compute/resource_tracker.py:105 +#: nova/compute/resource_tracker.py:111 msgid "" "Host field should not be set on the instance until resources have been " "claimed." msgstr "" -#: nova/compute/resource_tracker.py:110 +#: nova/compute/resource_tracker.py:116 msgid "" "Node field should not be set on the instance until resources have been " "claimed." msgstr "" -#: nova/compute/resource_tracker.py:272 +#: nova/compute/resource_tracker.py:276 #, python-format msgid "Cannot get the metrics from %s." msgstr "" -#: nova/compute/resource_tracker.py:291 +#: nova/compute/resource_tracker.py:295 msgid "Auditing locally available compute resources" msgstr "" -#: nova/compute/resource_tracker.py:296 +#: nova/compute/resource_tracker.py:300 msgid "" "Virt driver does not support 'get_available_resource' Compute tracking " "is disabled." msgstr "" -#: nova/compute/resource_tracker.py:371 +#: nova/compute/resource_tracker.py:375 #, python-format msgid "Compute_service record created for %(host)s:%(node)s" msgstr "" -#: nova/compute/resource_tracker.py:377 +#: nova/compute/resource_tracker.py:381 #, python-format msgid "Compute_service record updated for %(host)s:%(node)s" msgstr "" -#: nova/compute/resource_tracker.py:430 +#: nova/compute/resource_tracker.py:446 #, python-format -msgid "Free ram (MB): %s" +msgid "" +"Total physical ram (MB): %(pram)s, total allocated virtual ram (MB): " +"%(vram)s" msgstr "" -#: nova/compute/resource_tracker.py:431 +#: nova/compute/resource_tracker.py:450 #, python-format msgid "Free disk (GB): %s" msgstr "" -#: nova/compute/resource_tracker.py:436 +#: nova/compute/resource_tracker.py:454 #, python-format -msgid "Free VCPUS: %s" +msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s" msgstr "" -#: nova/compute/resource_tracker.py:438 +#: nova/compute/resource_tracker.py:458 msgid "Free VCPU information unavailable" msgstr "" -#: nova/compute/resource_tracker.py:441 +#: nova/compute/resource_tracker.py:461 #, python-format msgid "PCI stats: %s" msgstr "" -#: nova/compute/resource_tracker.py:486 +#: nova/compute/resource_tracker.py:512 #, python-format msgid "Updating from migration %s" msgstr "" -#: nova/compute/resource_tracker.py:553 +#: nova/compute/resource_tracker.py:577 msgid "Instance not resizing, skipping migration." msgstr "" -#: nova/compute/resource_tracker.py:568 +#: nova/compute/resource_tracker.py:592 msgid "Flavor could not be found, skipping migration." msgstr "" -#: nova/compute/resource_tracker.py:658 +#: nova/compute/resource_tracker.py:682 #, python-format msgid "" "Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB " "memory)" msgstr "" -#: nova/compute/resource_tracker.py:672 +#: nova/compute/resource_tracker.py:696 #, python-format msgid "Missing keys: %s" msgstr "" @@ -5691,24 +5328,8 @@ msgstr "" msgid "Unable to find host for Instance %s" msgstr "" -#: nova/compute/utils.py:204 -#, python-format -msgid "Can't access image %(image_id)s: %(error)s" -msgstr "" - -#: nova/compute/utils.py:328 -#, python-format -msgid "" -"No host name specified for the notification of HostAPI.%s and it will be " -"ignored" -msgstr "" - -#: nova/compute/utils.py:456 -#, python-format -msgid "" -"Value of 0 or None specified for %s. This behaviour will change in " -"meaning in the K release, to mean 'call at the default rate' rather than " -"'do not call'. To keep the 'do not call' behaviour, use a negative value." +#: nova/compute/stats.py:49 +msgid "Unexpected type adding stats" msgstr "" #: nova/compute/monitors/__init__.py:176 @@ -5733,47 +5354,47 @@ msgstr "" msgid "Not all properties needed are implemented in the compute driver: %s" msgstr "" -#: nova/conductor/api.py:318 +#: nova/conductor/api.py:315 msgid "nova-conductor connection established successfully" msgstr "" -#: nova/conductor/api.py:323 +#: nova/conductor/api.py:320 msgid "" "Timed out waiting for nova-conductor. Is it running? Or did this service" " start before nova-conductor? Reattempting establishment of nova-" "conductor connection..." msgstr "" -#: nova/conductor/manager.py:124 +#: nova/conductor/manager.py:123 #, python-format msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s" msgstr "" -#: nova/conductor/manager.py:523 +#: nova/conductor/manager.py:519 msgid "No valid host found for cold migrate" msgstr "" -#: nova/conductor/manager.py:586 +#: nova/conductor/manager.py:582 #, python-format msgid "" "Migration of instance %(instance_id)s to host %(dest)s unexpectedly " "failed." msgstr "" -#: nova/conductor/manager.py:673 +#: nova/conductor/manager.py:669 #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "" -#: nova/conductor/manager.py:696 +#: nova/conductor/manager.py:692 msgid "No valid host found for unshelve instance" msgstr "" -#: nova/conductor/manager.py:700 +#: nova/conductor/manager.py:696 msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED" msgstr "" -#: nova/conductor/manager.py:737 +#: nova/conductor/manager.py:733 msgid "No valid host found for rebuild" msgstr "" @@ -5845,85 +5466,85 @@ msgstr "" msgid "Failed to notify cells of instance update" msgstr "" -#: nova/db/api.py:1685 +#: nova/db/api.py:1683 msgid "Failed to notify cells of bw_usage update" msgstr "" -#: nova/db/sqlalchemy/api.py:204 +#: nova/db/sqlalchemy/api.py:207 #, python-format msgid "Deadlock detected when running '%(func_name)s': Retrying..." msgstr "" -#: nova/db/sqlalchemy/api.py:245 +#: nova/db/sqlalchemy/api.py:248 msgid "model or base_model parameter should be subclass of NovaBase" msgstr "" -#: nova/db/sqlalchemy/api.py:258 -#: nova/openstack/common/db/sqlalchemy/utils.py:174 -#: nova/virt/baremetal/db/sqlalchemy/api.py:60 +#: nova/db/sqlalchemy/api.py:261 +#: nova/openstack/common/db/sqlalchemy/utils.py:173 +#: nova/virt/baremetal/db/sqlalchemy/api.py:61 #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:750 +#: nova/db/sqlalchemy/api.py:753 #, python-format msgid "Invalid floating ip id %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:855 +#: nova/db/sqlalchemy/api.py:858 msgid "Failed to update usages bulk deallocating floating IP" msgstr "" -#: nova/db/sqlalchemy/api.py:1011 +#: nova/db/sqlalchemy/api.py:1007 #, python-format msgid "Invalid floating IP %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:1313 nova/db/sqlalchemy/api.py:1352 +#: nova/db/sqlalchemy/api.py:1310 nova/db/sqlalchemy/api.py:1349 #, python-format msgid "Invalid fixed IP Address %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:1487 +#: nova/db/sqlalchemy/api.py:1484 #, python-format msgid "Invalid virtual interface address %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:1581 +#: nova/db/sqlalchemy/api.py:1578 #, python-format msgid "" "Unknown osapi_compute_unique_server_name_scope value: %s Flag must be " "empty, \"global\" or \"project\"" msgstr "" -#: nova/db/sqlalchemy/api.py:1741 +#: nova/db/sqlalchemy/api.py:1738 #, python-format msgid "Invalid instance id %s in request" msgstr "" -#: nova/db/sqlalchemy/api.py:2019 +#: nova/db/sqlalchemy/api.py:2017 #, python-format msgid "Invalid field name: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:3248 +#: nova/db/sqlalchemy/api.py:3246 #, python-format msgid "Change will make usage less than 0 for the following resources: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:4899 +#: nova/db/sqlalchemy/api.py:4898 #, python-format msgid "" "Volume(%s) has lower stats then what is in the database. Instance must " "have been rebooted or crashed. Updating totals." msgstr "" -#: nova/db/sqlalchemy/api.py:5256 +#: nova/db/sqlalchemy/api.py:5262 #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" -#: nova/db/sqlalchemy/api.py:5646 +#: nova/db/sqlalchemy/api.py:5652 #, python-format msgid "IntegrityError detected when archiving table %s" msgstr "" @@ -5956,15 +5577,15 @@ msgstr "" msgid "Extra column %(table)s.%(column)s in shadow table" msgstr "" -#: nova/db/sqlalchemy/utils.py:105 +#: nova/db/sqlalchemy/utils.py:103 msgid "Specify `table_name` or `table` param" msgstr "" -#: nova/db/sqlalchemy/utils.py:108 +#: nova/db/sqlalchemy/utils.py:106 msgid "Specify only one param `table_name` `table`" msgstr "" -#: nova/db/sqlalchemy/utils.py:131 nova/db/sqlalchemy/utils.py:135 +#: nova/db/sqlalchemy/utils.py:129 nova/db/sqlalchemy/utils.py:133 #: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:84 #: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:1103 msgid "Exception while creating table." @@ -5988,12 +5609,12 @@ msgid "" "%(ex)s" msgstr "" -#: nova/image/glance.py:306 +#: nova/image/glance.py:327 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "" -#: nova/image/glance.py:322 +#: nova/image/glance.py:343 #, python-format msgid "Successfully transferred using %s" msgstr "" @@ -6139,7 +5760,7 @@ msgstr "" msgid "Not deleting key %s" msgstr "" -#: nova/network/api.py:195 nova/network/neutronv2/api.py:797 +#: nova/network/api.py:196 nova/network/neutronv2/api.py:812 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "" @@ -6166,65 +5787,65 @@ msgstr "" msgid "Fixed ip %s not found" msgstr "" -#: nova/network/floating_ips.py:175 +#: nova/network/floating_ips.py:176 #, python-format msgid "Floating IP %s is not associated. Ignore." msgstr "" -#: nova/network/floating_ips.py:194 +#: nova/network/floating_ips.py:195 #, python-format msgid "Address |%(address)s| is not allocated" msgstr "" -#: nova/network/floating_ips.py:198 +#: nova/network/floating_ips.py:199 #, python-format msgid "Address |%(address)s| is not allocated to your project |%(project)s|" msgstr "" -#: nova/network/floating_ips.py:218 +#: nova/network/floating_ips.py:219 #, python-format msgid "Quota exceeded for %s, tried to allocate floating IP" msgstr "" -#: nova/network/floating_ips.py:277 +#: nova/network/floating_ips.py:278 msgid "Failed to update usages deallocating floating IP" msgstr "" -#: nova/network/floating_ips.py:375 +#: nova/network/floating_ips.py:376 #, python-format msgid "Failed to disassociated floating address: %s" msgstr "" -#: nova/network/floating_ips.py:380 +#: nova/network/floating_ips.py:381 #, python-format msgid "Interface %s not found" msgstr "" -#: nova/network/floating_ips.py:539 +#: nova/network/floating_ips.py:540 #, python-format msgid "Starting migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:545 +#: nova/network/floating_ips.py:546 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will not migrate it " msgstr "" -#: nova/network/floating_ips.py:574 +#: nova/network/floating_ips.py:575 #, python-format msgid "Finishing migration network for instance %s" msgstr "" -#: nova/network/floating_ips.py:581 +#: nova/network/floating_ips.py:582 #, python-format msgid "" "Floating ip address |%(address)s| no longer belongs to instance " "%(instance_uuid)s. Will notsetup it." msgstr "" -#: nova/network/floating_ips.py:624 +#: nova/network/floating_ips.py:625 #, python-format msgid "" "Database inconsistency: DNS domain |%s| is registered in the Nova db but " @@ -6232,12 +5853,12 @@ msgid "" "ignored." msgstr "" -#: nova/network/floating_ips.py:664 +#: nova/network/floating_ips.py:665 #, python-format msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." msgstr "" -#: nova/network/floating_ips.py:673 +#: nova/network/floating_ips.py:674 #, python-format msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." msgstr "" @@ -6283,52 +5904,52 @@ msgid "" "%(top)r" msgstr "" -#: nova/network/linux_net.py:769 +#: nova/network/linux_net.py:777 #, python-format msgid "Removed %(num)d duplicate rules for floating ip %(float)s" msgstr "" -#: nova/network/linux_net.py:817 +#: nova/network/linux_net.py:825 #, python-format msgid "Error deleting conntrack entries for %s" msgstr "" -#: nova/network/linux_net.py:1072 +#: nova/network/linux_net.py:1091 #, python-format msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/network/linux_net.py:1154 +#: nova/network/linux_net.py:1172 #, python-format msgid "killing radvd threw %s" msgstr "" -#: nova/network/linux_net.py:1308 +#: nova/network/linux_net.py:1333 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: nova/network/linux_net.py:1366 +#: nova/network/linux_net.py:1391 #, python-format msgid "Failed removing net device: '%s'" msgstr "" -#: nova/network/linux_net.py:1543 +#: nova/network/linux_net.py:1568 #, python-format msgid "Adding interface %(interface)s to bridge %(bridge)s" msgstr "" -#: nova/network/linux_net.py:1549 +#: nova/network/linux_net.py:1574 #, python-format msgid "Failed to add interface: %s" msgstr "" -#: nova/network/manager.py:828 +#: nova/network/manager.py:813 #, python-format msgid "instance-dns-zone not found |%s|." msgstr "" -#: nova/network/manager.py:835 +#: nova/network/manager.py:820 #, python-format msgid "" "instance-dns-zone is |%(domain)s|, which is in availability zone " @@ -6336,88 +5957,83 @@ msgid "" "created." msgstr "" -#: nova/network/manager.py:874 -#, python-format -msgid "Quota exceeded for %s, tried to allocate fixed IP" -msgstr "" - -#: nova/network/manager.py:934 +#: nova/network/manager.py:943 msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required." msgstr "" -#: nova/network/manager.py:964 +#: nova/network/manager.py:973 msgid "Failed to update usages deallocating fixed IP" msgstr "" -#: nova/network/manager.py:988 +#: nova/network/manager.py:997 #, python-format msgid "Unable to release %s because vif doesn't exist." msgstr "" -#: nova/network/manager.py:1029 +#: nova/network/manager.py:1038 #, python-format msgid "IP %s leased that is not associated" msgstr "" -#: nova/network/manager.py:1035 +#: nova/network/manager.py:1044 #, python-format msgid "IP |%s| leased that isn't allocated" msgstr "" -#: nova/network/manager.py:1044 +#: nova/network/manager.py:1053 #, python-format msgid "IP %s released that is not associated" msgstr "" -#: nova/network/manager.py:1048 +#: nova/network/manager.py:1057 #, python-format msgid "IP %s released that was not leased" msgstr "" -#: nova/network/manager.py:1066 +#: nova/network/manager.py:1075 #, python-format msgid "%s must be an integer" msgstr "" -#: nova/network/manager.py:1098 +#: nova/network/manager.py:1107 msgid "Maximum allowed length for 'label' is 255." msgstr "" -#: nova/network/manager.py:1118 +#: nova/network/manager.py:1127 #, python-format msgid "" "Subnet(s) too large, defaulting to /%s. To override, specify " "network_size flag." msgstr "" -#: nova/network/manager.py:1203 +#: nova/network/manager.py:1212 msgid "cidr already in use" msgstr "" -#: nova/network/manager.py:1206 +#: nova/network/manager.py:1215 #, python-format msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" msgstr "" -#: nova/network/manager.py:1217 +#: nova/network/manager.py:1226 #, python-format msgid "" "requested cidr (%(cidr)s) conflicts with existing smaller cidr " "(%(smaller)s)" msgstr "" -#: nova/network/manager.py:1311 +#: nova/network/manager.py:1320 #, python-format msgid "Network must be disassociated from project %s before delete" msgstr "" -#: nova/network/manager.py:1937 +#: nova/network/manager.py:1955 msgid "" "The sum between the number of networks and the vlan start cannot be " "greater than 4094" msgstr "" -#: nova/network/manager.py:1944 +#: nova/network/manager.py:1962 #, python-format msgid "" "The network range is not big enough to fit %(num_networks)s networks. " @@ -6447,103 +6063,87 @@ msgstr "" msgid "Cannot delete domain |%s|" msgstr "" -#: nova/network/model.py:94 +#: nova/network/model.py:96 #, python-format msgid "Invalid IP format %s" msgstr "" -#: nova/network/neutronv2/api.py:212 -msgid "Neutron error: quota exceeded" -msgstr "" - -#: nova/network/neutronv2/api.py:215 +#: nova/network/neutronv2/api.py:230 #, python-format msgid "Neutron error creating port on network %s" msgstr "" -#: nova/network/neutronv2/api.py:248 +#: nova/network/neutronv2/api.py:263 #, python-format msgid "empty project id for instance %s" msgstr "" -#: nova/network/neutronv2/api.py:283 +#: nova/network/neutronv2/api.py:298 msgid "No network configured!" msgstr "" -#: nova/network/neutronv2/api.py:303 +#: nova/network/neutronv2/api.py:318 #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more " "specific." msgstr "" -#: nova/network/neutronv2/api.py:373 +#: nova/network/neutronv2/api.py:388 #, python-format msgid "Failed to update port %s" msgstr "" -#: nova/network/neutronv2/api.py:380 +#: nova/network/neutronv2/api.py:395 #, python-format msgid "Failed to delete port %s" msgstr "" -#: nova/network/neutronv2/api.py:443 +#: nova/network/neutronv2/api.py:458 #, python-format msgid "Unable to reset device ID for port %s" msgstr "" -#: nova/network/neutronv2/api.py:451 +#: nova/network/neutronv2/api.py:466 #, python-format msgid "Port %s does not exist" msgstr "" -#: nova/network/neutronv2/api.py:454 nova/network/neutronv2/api.py:478 +#: nova/network/neutronv2/api.py:469 nova/network/neutronv2/api.py:493 #, python-format msgid "Failed to delete neutron port %s" msgstr "" -#: nova/network/neutronv2/api.py:576 -#, python-format -msgid "" -"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: " -"%(exception)s" -msgstr "" - -#: nova/network/neutronv2/api.py:605 -#, python-format -msgid "Unable to update port %(portid)s with failure: %(exception)s" -msgstr "" - -#: nova/network/neutronv2/api.py:632 +#: nova/network/neutronv2/api.py:647 msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" -#: nova/network/neutronv2/api.py:651 +#: nova/network/neutronv2/api.py:666 #, python-format msgid "Failed to access port %s" msgstr "" -#: nova/network/neutronv2/api.py:880 +#: nova/network/neutronv2/api.py:898 #, python-format msgid "Unable to access floating IP %s" msgstr "" -#: nova/network/neutronv2/api.py:968 +#: nova/network/neutronv2/api.py:986 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" -#: nova/network/neutronv2/api.py:1012 +#: nova/network/neutronv2/api.py:1030 #, python-format msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" msgstr "" -#: nova/network/neutronv2/api.py:1071 +#: nova/network/neutronv2/api.py:1089 #, python-format msgid "Unable to update host of port %s" msgstr "" -#: nova/network/neutronv2/api.py:1107 +#: nova/network/neutronv2/api.py:1125 #, python-format msgid "" "Network %(id)s not matched with the tenants network! The ports tenant " @@ -6708,7 +6308,7 @@ msgstr "" msgid "A NetworkModel is required here" msgstr "" -#: nova/objects/instance.py:431 +#: nova/objects/instance.py:433 #, python-format msgid "No save handler for %s" msgstr "" @@ -6717,11 +6317,11 @@ msgstr "" msgid "Failed to notify cells of instance info cache update" msgstr "" -#: nova/openstack/common/gettextutils.py:320 +#: nova/openstack/common/gettextutils.py:301 msgid "Message objects do not support addition." msgstr "" -#: nova/openstack/common/gettextutils.py:330 +#: nova/openstack/common/gettextutils.py:311 msgid "" "Message objects do not support str() because they may contain non-ascii " "characters. Please use unicode() or translate() instead." @@ -6741,22 +6341,22 @@ msgstr "" msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" -#: nova/openstack/common/log.py:327 +#: nova/openstack/common/log.py:276 #, python-format msgid "Deprecated: %s" msgstr "" -#: nova/openstack/common/log.py:436 +#: nova/openstack/common/log.py:385 #, python-format msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: nova/openstack/common/log.py:486 +#: nova/openstack/common/log.py:446 #, python-format msgid "syslog facility must be one of: %s" msgstr "" -#: nova/openstack/common/log.py:729 +#: nova/openstack/common/log.py:689 #, python-format msgid "Fatal call to deprecated config: %(msg)s" msgstr "" @@ -6815,40 +6415,50 @@ msgstr "" msgid "process_input not supported over SSH" msgstr "" -#: nova/openstack/common/sslutils.py:98 +#: nova/openstack/common/sslutils.py:95 #, python-format msgid "Invalid SSL version : %s" msgstr "" -#: nova/openstack/common/strutils.py:92 +#: nova/openstack/common/strutils.py:114 #, python-format msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" msgstr "" -#: nova/openstack/common/strutils.py:197 +#: nova/openstack/common/strutils.py:219 #, python-format msgid "Invalid unit system: \"%s\"" msgstr "" -#: nova/openstack/common/strutils.py:206 +#: nova/openstack/common/strutils.py:228 #, python-format msgid "Invalid string format: %s" msgstr "" -#: nova/openstack/common/versionutils.py:69 +#: nova/openstack/common/versionutils.py:86 #, python-format msgid "" "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " "may be removed in %(remove_in)s." msgstr "" -#: nova/openstack/common/versionutils.py:73 +#: nova/openstack/common/versionutils.py:90 #, python-format msgid "" "%(what)s is deprecated as of %(as_of)s and may be removed in " "%(remove_in)s. It will not be superseded." msgstr "" +#: nova/openstack/common/versionutils.py:94 +#, python-format +msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s." +msgstr "" + +#: nova/openstack/common/versionutils.py:97 +#, python-format +msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded." +msgstr "" + #: nova/openstack/common/db/sqlalchemy/migration.py:226 #, python-format msgid "" @@ -6862,18 +6472,18 @@ msgid "" "the current version of the schema manually." msgstr "" -#: nova/openstack/common/db/sqlalchemy/utils.py:119 +#: nova/openstack/common/db/sqlalchemy/utils.py:118 msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" -#: nova/openstack/common/db/sqlalchemy/utils.py:162 +#: nova/openstack/common/db/sqlalchemy/utils.py:161 #, python-format msgid "" "There is no `deleted` column in `%s` table. Project doesn't use soft-" "deleted feature." msgstr "" -#: nova/openstack/common/db/sqlalchemy/utils.py:181 +#: nova/openstack/common/db/sqlalchemy/utils.py:180 #, python-format msgid "There is no `project_id` column in `%s` table." msgstr "" @@ -6900,7 +6510,7 @@ msgstr "" msgid "Unsupported id columns type" msgstr "" -#: nova/pci/pci_manager.py:156 +#: nova/pci/pci_manager.py:113 #, python-format msgid "" "Trying to remove device with %(status)s ownership %(instance_uuid)s " @@ -6932,22 +6542,30 @@ msgstr "" msgid "Driver must implement select_destinations" msgstr "" -#: nova/scheduler/filter_scheduler.py:80 +#: nova/scheduler/filter_scheduler.py:84 #, python-format msgid "" "Attempting to build %(num_instances)d instance(s) uuids: " "%(instance_uuids)s" msgstr "" -#: nova/scheduler/filter_scheduler.py:109 +#: nova/scheduler/filter_scheduler.py:113 #, python-format msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s" msgstr "" -#: nova/scheduler/filter_scheduler.py:169 +#: nova/scheduler/filter_scheduler.py:173 msgid "Instance disappeared during scheduling" msgstr "" +#: nova/scheduler/filter_scheduler.py:219 +msgid "ServerGroupAffinityFilter not configured" +msgstr "" + +#: nova/scheduler/filter_scheduler.py:224 +msgid "ServerGroupAntiAffinityFilter not configured" +msgstr "" + #: nova/scheduler/host_manager.py:169 #, python-format msgid "Metric name unknown of %r" @@ -6986,7 +6604,6 @@ msgid "No nodes matched due to not matching 'force_nodes' value of '%s'" msgstr "" #: nova/scheduler/host_manager.py:390 -#: nova/scheduler/filters/trusted_filter.py:208 #, python-format msgid "No service for compute ID %s" msgstr "" @@ -7027,7 +6644,7 @@ msgstr "" msgid "Invalid value for 'scheduler_max_attempts', must be >= 1" msgstr "" -#: nova/scheduler/utils.py:233 +#: nova/scheduler/utils.py:231 #, python-format msgid "Ignoring the invalid elements of the option %(name)s: %(options)s" msgstr "" @@ -7037,6 +6654,10 @@ msgstr "" msgid "%(host_state)s has not been heard from in a while" msgstr "" +#: nova/scheduler/filters/exact_core_filter.py:36 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + #: nova/servicegroup/api.py:70 #, python-format msgid "unknown ServiceGroup driver name: %s" @@ -7134,15 +6755,15 @@ msgstr "" msgid "status must be available" msgstr "" -#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:245 +#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:290 msgid "already attached" msgstr "" -#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:256 +#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:301 msgid "Instance and volume not in same availability_zone" msgstr "" -#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:262 +#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:307 msgid "already detached" msgstr "" @@ -7150,53 +6771,53 @@ msgstr "" msgid "unexpected role header" msgstr "" -#: nova/tests/api/openstack/test_faults.py:46 +#: nova/tests/api/openstack/test_faults.py:47 msgid "Should be translated." msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3225 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2434 +#: nova/tests/api/openstack/compute/test_servers.py:3279 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2438 msgid "" "Quota exceeded for instances: Requested 1, but already used 10 of 10 " "instances" msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3230 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2439 +#: nova/tests/api/openstack/compute/test_servers.py:3284 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2443 msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram" msgstr "" -#: nova/tests/api/openstack/compute/test_servers.py:3235 -#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2444 +#: nova/tests/api/openstack/compute/test_servers.py:3289 +#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2448 msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "" -#: nova/tests/compute/test_compute.py:1680 -#: nova/tests/compute/test_compute.py:1707 -#: nova/tests/compute/test_compute.py:1785 -#: nova/tests/compute/test_compute.py:1825 -#: nova/tests/compute/test_compute.py:5603 +#: nova/tests/compute/test_compute.py:1696 +#: nova/tests/compute/test_compute.py:1723 +#: nova/tests/compute/test_compute.py:1801 +#: nova/tests/compute/test_compute.py:1841 +#: nova/tests/compute/test_compute.py:5644 #, python-format msgid "Running instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:1687 -#: nova/tests/compute/test_compute.py:1755 -#: nova/tests/compute/test_compute.py:1793 +#: nova/tests/compute/test_compute.py:1703 +#: nova/tests/compute/test_compute.py:1771 +#: nova/tests/compute/test_compute.py:1809 #, python-format msgid "After terminating instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:5614 +#: nova/tests/compute/test_compute.py:5655 #, python-format msgid "After force-killing instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:6229 +#: nova/tests/compute/test_compute.py:6271 msgid "wrong host/node" msgstr "" -#: nova/tests/compute/test_compute.py:10820 +#: nova/tests/compute/test_compute.py:10867 msgid "spawn error" msgstr "" @@ -7204,7 +6825,16 @@ msgstr "" msgid "Keypair data is invalid" msgstr "" -#: nova/tests/db/test_migrations.py:866 +#: nova/tests/compute/test_resources.py:78 +#, python-format +msgid "Free %(free)d < requested %(requested)d " +msgstr "" + +#: nova/tests/compute/test_resources.py:329 +msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs" +msgstr "" + +#: nova/tests/db/test_migrations.py:923 #, python-format msgid "" "The following migrations are missing a downgrade:\n" @@ -7283,56 +6913,56 @@ msgstr "" msgid "Unexpected status code" msgstr "" -#: nova/tests/virt/hyperv/test_hypervapi.py:517 +#: nova/tests/virt/hyperv/test_hypervapi.py:513 msgid "fake vswitch not found" msgstr "" -#: nova/tests/virt/hyperv/test_hypervapi.py:970 +#: nova/tests/virt/hyperv/test_hypervapi.py:966 msgid "Simulated failure" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1041 +#: nova/tests/virt/libvirt/fakelibvirt.py:1048 msgid "Expected a list for 'auth' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1045 +#: nova/tests/virt/libvirt/fakelibvirt.py:1052 msgid "Expected a function in 'auth[0]' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1049 +#: nova/tests/virt/libvirt/fakelibvirt.py:1056 msgid "Expected a function in 'auth[1]' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1060 +#: nova/tests/virt/libvirt/fakelibvirt.py:1067 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:244 +#: nova/tests/virt/vmwareapi/fake.py:241 #, python-format msgid "Property %(attr)s not set for the managed object %(name)s" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:969 +#: nova/tests/virt/vmwareapi/fake.py:985 msgid "There is no VM registered" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:971 nova/tests/virt/vmwareapi/fake.py:1307 +#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1323 #, python-format msgid "Virtual Machine with ref %s is not there" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:1096 +#: nova/tests/virt/vmwareapi/fake.py:1112 msgid "Session Invalid" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:1304 +#: nova/tests/virt/vmwareapi/fake.py:1320 msgid "No Virtual Machine has been registered yet" msgstr "" #: nova/tests/virt/vmwareapi/test_ds_util.py:221 -#: nova/virt/vmwareapi/ds_util.py:265 +#: nova/virt/vmwareapi/ds_util.py:267 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7356,35 +6986,49 @@ msgstr "" msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "" -#: nova/virt/block_device.py:243 +#: nova/virt/block_device.py:241 #, python-format msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/virt/block_device.py:362 +#: nova/virt/block_device.py:363 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/virt/driver.py:1242 +#: nova/virt/diagnostics.py:143 +#, python-format +msgid "Invalid type for %s" +msgstr "" + +#: nova/virt/diagnostics.py:147 +#, python-format +msgid "Invalid type for %s entry" +msgstr "" + +#: nova/virt/driver.py:705 +msgid "Hypervisor driver does not support post_live_migration_at_source method" +msgstr "" + +#: nova/virt/driver.py:1261 msgid "Event must be an instance of nova.virt.event.Event" msgstr "" -#: nova/virt/driver.py:1248 +#: nova/virt/driver.py:1267 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "" -#: nova/virt/driver.py:1330 +#: nova/virt/driver.py:1361 msgid "Compute driver option required, but not specified" msgstr "" -#: nova/virt/driver.py:1333 +#: nova/virt/driver.py:1364 #, python-format msgid "Loading compute driver '%s'" msgstr "" -#: nova/virt/driver.py:1340 +#: nova/virt/driver.py:1371 msgid "Unable to load the virtualization driver" msgstr "" @@ -7413,7 +7057,7 @@ msgstr "" msgid "Key '%(key)s' not in instances '%(inst)s'" msgstr "" -#: nova/virt/firewall.py:176 +#: nova/virt/firewall.py:174 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -7491,37 +7135,37 @@ msgstr "" msgid "Baremetal node id not supplied to driver for %r" msgstr "" -#: nova/virt/baremetal/driver.py:289 +#: nova/virt/baremetal/driver.py:292 #, python-format msgid "Error deploying instance %(instance)s on baremetal node %(node)s." msgstr "" -#: nova/virt/baremetal/driver.py:364 +#: nova/virt/baremetal/driver.py:367 #, python-format msgid "Baremetal power manager failed to restart node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:376 +#: nova/virt/baremetal/driver.py:379 #, python-format msgid "Destroy called on non-existing instance %s" msgstr "" -#: nova/virt/baremetal/driver.py:394 +#: nova/virt/baremetal/driver.py:397 #, python-format msgid "Error from baremetal driver during destroy: %s" msgstr "" -#: nova/virt/baremetal/driver.py:399 +#: nova/virt/baremetal/driver.py:402 #, python-format msgid "Error while recording destroy failure in baremetal database: %s" msgstr "" -#: nova/virt/baremetal/driver.py:414 +#: nova/virt/baremetal/driver.py:417 #, python-format msgid "Baremetal power manager failed to stop node for instance %r" msgstr "" -#: nova/virt/baremetal/driver.py:427 +#: nova/virt/baremetal/driver.py:430 #, python-format msgid "Baremetal power manager failed to start node for instance %r" msgstr "" @@ -7606,7 +7250,7 @@ msgid "" "passed to baremetal driver: %s" msgstr "" -#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:317 +#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:318 #, python-format msgid "Node associated with another instance while waiting for deploy of %s" msgstr "" @@ -7626,7 +7270,7 @@ msgstr "" msgid "PXE deploy failed for instance %s" msgstr "" -#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:342 +#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:343 #, python-format msgid "Baremetal node deleted while waiting for deployment of instance %s" msgstr "" @@ -7643,21 +7287,21 @@ msgid "" "not passed to baremetal driver: %s" msgstr "" -#: nova/virt/baremetal/tilera.py:323 +#: nova/virt/baremetal/tilera.py:324 #, python-format msgid "Tilera deploy started for instance %s" msgstr "" -#: nova/virt/baremetal/tilera.py:329 +#: nova/virt/baremetal/tilera.py:330 #, python-format msgid "Tilera deploy completed for instance %s" msgstr "" -#: nova/virt/baremetal/tilera.py:337 +#: nova/virt/baremetal/tilera.py:338 msgid "Node is unknown error state." msgstr "" -#: nova/virt/baremetal/tilera.py:340 +#: nova/virt/baremetal/tilera.py:341 #, python-format msgid "Tilera deploy failed for instance %s" msgstr "" @@ -7773,72 +7417,55 @@ msgstr "" msgid "detach volume could not find tid for %s" msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:198 +#: nova/virt/baremetal/db/sqlalchemy/api.py:199 msgid "instance_uuid must be supplied to bm_node_associate_and_update" msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:210 +#: nova/virt/baremetal/db/sqlalchemy/api.py:211 #, python-format msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s." msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:245 -#: nova/virt/baremetal/db/sqlalchemy/api.py:287 +#: nova/virt/baremetal/db/sqlalchemy/api.py:246 +#: nova/virt/baremetal/db/sqlalchemy/api.py:288 #, python-format msgid "Baremetal interface %s not found" msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:297 +#: nova/virt/baremetal/db/sqlalchemy/api.py:298 #, python-format msgid "Baremetal interface %s already in use" msgstr "" -#: nova/virt/baremetal/db/sqlalchemy/api.py:310 +#: nova/virt/baremetal/db/sqlalchemy/api.py:311 #, python-format msgid "Baremetal virtual interface %s not found" msgstr "" -#: nova/virt/disk/api.py:280 +#: nova/virt/disk/api.py:292 msgid "image already mounted" msgstr "" -#: nova/virt/disk/api.py:354 -#, python-format -msgid "Ignoring error injecting data into image (%(e)s)" -msgstr "" - -#: nova/virt/disk/api.py:376 -#, python-format -msgid "" -"Failed to mount container filesystem '%(image)s' on '%(target)s': " -"%(errors)s" -msgstr "" - -#: nova/virt/disk/api.py:406 +#: nova/virt/disk/api.py:418 #, python-format msgid "Failed to teardown container filesystem: %s" msgstr "" -#: nova/virt/disk/api.py:419 +#: nova/virt/disk/api.py:431 #, python-format msgid "Failed to umount container filesystem: %s" msgstr "" -#: nova/virt/disk/api.py:444 -#, python-format -msgid "Ignoring error injecting %(inject)s into image (%(e)s)" -msgstr "" - -#: nova/virt/disk/api.py:604 +#: nova/virt/disk/api.py:616 msgid "Not implemented on Windows" msgstr "" -#: nova/virt/disk/api.py:631 +#: nova/virt/disk/api.py:643 #, python-format msgid "User %(username)s not found in password file." msgstr "" -#: nova/virt/disk/api.py:647 +#: nova/virt/disk/api.py:659 #, python-format msgid "User %(username)s not found in shadow file." msgstr "" @@ -7918,44 +7545,44 @@ msgstr "" msgid "Detaching from erroneous nbd device returned error: %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:64 +#: nova/virt/disk/vfs/guestfs.py:77 #, python-format msgid "No operating system found in %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:70 +#: nova/virt/disk/vfs/guestfs.py:83 #, python-format msgid "Multi-boot operating system found in %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:81 +#: nova/virt/disk/vfs/guestfs.py:94 #, python-format msgid "No mount points found in %(root)s of %(imgfile)s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:95 +#: nova/virt/disk/vfs/guestfs.py:108 #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(imgfile)s with libguestfs" " (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:131 +#: nova/virt/disk/vfs/guestfs.py:154 #, python-format msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:147 +#: nova/virt/disk/vfs/guestfs.py:170 #, python-format msgid "Failed to close augeas %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:155 +#: nova/virt/disk/vfs/guestfs.py:178 #, python-format msgid "Failed to shutdown appliance %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:163 +#: nova/virt/disk/vfs/guestfs.py:186 #, python-format msgid "Failed to close guest handle %s" msgstr "" @@ -8021,22 +7648,27 @@ msgstr "" msgid "Duplicate VM name found: %s" msgstr "" -#: nova/virt/hyperv/migrationops.py:97 +#: nova/virt/hyperv/migrationops.py:98 msgid "Cannot cleanup migration files" msgstr "" -#: nova/virt/hyperv/migrationops.py:105 +#: nova/virt/hyperv/migrationops.py:106 #, python-format msgid "" "Cannot resize the root disk to a smaller size. Current size: " "%(curr_root_gb)s GB. Requested size: %(new_root_gb)s GB" msgstr "" -#: nova/virt/hyperv/migrationops.py:200 +#: nova/virt/hyperv/migrationops.py:155 +#, python-format +msgid "Config drive is required by instance: %s, but it does not exist." +msgstr "" + +#: nova/virt/hyperv/migrationops.py:214 msgid "Cannot resize a VHD to a smaller size" msgstr "" -#: nova/virt/hyperv/migrationops.py:245 +#: nova/virt/hyperv/migrationops.py:259 #, python-format msgid "Cannot find boot VHD file for instance: %s" msgstr "" @@ -8055,7 +7687,7 @@ msgstr "" msgid "No external vswitch found" msgstr "" -#: nova/virt/hyperv/pathutils.py:72 +#: nova/virt/hyperv/pathutils.py:73 #, python-format msgid "The file copy from %(src)s to %(dest)s failed" msgstr "" @@ -8070,25 +7702,20 @@ msgstr "" msgid "Unsupported disk format: %s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:151 -#, python-format -msgid "The %(vhd_type)s type VHD is not supported" +#: nova/virt/hyperv/vhdutils.py:77 +msgid "VHD differencing disks cannot be resized" msgstr "" -#: nova/virt/hyperv/vhdutils.py:162 +#: nova/virt/hyperv/vhdutils.py:165 #, python-format msgid "Unable to obtain block size from VHD %(vhd_path)s" msgstr "" -#: nova/virt/hyperv/vhdutils.py:209 +#: nova/virt/hyperv/vhdutils.py:212 msgid "Unsupported virtual disk format" msgstr "" -#: nova/virt/hyperv/vhdutilsv2.py:135 -msgid "Differencing VHDX images are not supported" -msgstr "" - -#: nova/virt/hyperv/vhdutilsv2.py:158 +#: nova/virt/hyperv/vhdutilsv2.py:160 #, python-format msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s" msgstr "" @@ -8098,46 +7725,46 @@ msgstr "" msgid "VIF driver not found for network_api_class: %s" msgstr "" -#: nova/virt/hyperv/vmops.py:169 +#: nova/virt/hyperv/vmops.py:198 #, python-format msgid "" -"Cannot resize a VHD to a smaller size, the original size is " -"%(base_vhd_size)s, the newer size is %(root_vhd_size)s" +"Cannot resize a VHD to a smaller size, the original size is %(old_size)s," +" the newer size is %(new_size)s" msgstr "" -#: nova/virt/hyperv/vmops.py:206 +#: nova/virt/hyperv/vmops.py:228 msgid "Spawning new instance" msgstr "" -#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:567 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:576 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "" -#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:571 +#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:580 msgid "Using config drive for instance" msgstr "" -#: nova/virt/hyperv/vmops.py:296 +#: nova/virt/hyperv/vmops.py:320 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:596 +#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:605 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/hyperv/vmops.py:340 +#: nova/virt/hyperv/vmops.py:371 msgid "Got request to destroy instance" msgstr "" -#: nova/virt/hyperv/vmops.py:359 +#: nova/virt/hyperv/vmops.py:390 #, python-format msgid "Failed to destroy instance: %s" msgstr "" -#: nova/virt/hyperv/vmops.py:412 +#: nova/virt/hyperv/vmops.py:443 #, python-format msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" @@ -8214,136 +7841,135 @@ msgstr "" msgid "Unable to determine disk bus for '%s'" msgstr "" -#: nova/virt/libvirt/driver.py:556 +#: nova/virt/libvirt/driver.py:552 #, python-format msgid "Connection to libvirt lost: %s" msgstr "" -#: nova/virt/libvirt/driver.py:739 +#: nova/virt/libvirt/driver.py:741 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" -#: nova/virt/libvirt/driver.py:932 +#: nova/virt/libvirt/driver.py:924 msgid "operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:1257 +#: nova/virt/libvirt/driver.py:1248 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" -#: nova/virt/libvirt/driver.py:1264 +#: nova/virt/libvirt/driver.py:1255 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" -#: nova/virt/libvirt/driver.py:1352 +#: nova/virt/libvirt/driver.py:1345 msgid "Swap only supports host devices" msgstr "" -#: nova/virt/libvirt/driver.py:1635 +#: nova/virt/libvirt/driver.py:1631 msgid "libvirt error while requesting blockjob info." msgstr "" -#: nova/virt/libvirt/driver.py:1776 +#: nova/virt/libvirt/driver.py:1774 msgid "Found no disk to snapshot." msgstr "" -#: nova/virt/libvirt/driver.py:1868 +#: nova/virt/libvirt/driver.py:1866 #, python-format msgid "Unknown type: %s" msgstr "" -#: nova/virt/libvirt/driver.py:1873 +#: nova/virt/libvirt/driver.py:1871 msgid "snapshot_id required in create_info" msgstr "" -#: nova/virt/libvirt/driver.py:1931 +#: nova/virt/libvirt/driver.py:1929 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" -#: nova/virt/libvirt/driver.py:1938 +#: nova/virt/libvirt/driver.py:1936 #, python-format msgid "Unknown delete_info type %s" msgstr "" -#: nova/virt/libvirt/driver.py:1966 +#: nova/virt/libvirt/driver.py:1964 #, python-format msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2407 nova/virt/xenapi/vmops.py:1552 +#: nova/virt/libvirt/driver.py:2406 nova/virt/xenapi/vmops.py:1561 msgid "Guest does not have a console available" msgstr "" -#: nova/virt/libvirt/driver.py:2823 +#: nova/virt/libvirt/driver.py:2735 +#, python-format +msgid "%s format is not supported" +msgstr "" + +#: nova/virt/libvirt/driver.py:2841 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "" -#: nova/virt/libvirt/driver.py:2989 +#: nova/virt/libvirt/driver.py:2984 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" msgstr "" -#: nova/virt/libvirt/driver.py:2995 +#: nova/virt/libvirt/driver.py:2990 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" -#: nova/virt/libvirt/driver.py:2999 +#: nova/virt/libvirt/driver.py:2994 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" -#: nova/virt/libvirt/driver.py:3019 -msgid "" -"Passthrough of the host CPU was requested but this libvirt version does " -"not support this feature" -msgstr "" - -#: nova/virt/libvirt/driver.py:3567 +#: nova/virt/libvirt/driver.py:3586 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3588 +#: nova/virt/libvirt/driver.py:3607 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3851 +#: nova/virt/libvirt/driver.py:3873 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "" -#: nova/virt/libvirt/driver.py:3974 +#: nova/virt/libvirt/driver.py:3998 msgid "libvirt version is too old (does not support getVersion)" msgstr "" -#: nova/virt/libvirt/driver.py:4335 +#: nova/virt/libvirt/driver.py:4359 msgid "Block migration can not be used with shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:4344 +#: nova/virt/libvirt/driver.py:4368 msgid "Live migration can not be used without shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:4414 +#: nova/virt/libvirt/driver.py:4438 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" msgstr "" -#: nova/virt/libvirt/driver.py:4453 +#: nova/virt/libvirt/driver.py:4477 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8353,12 +7979,12 @@ msgid "" "Refer to %(u)s" msgstr "" -#: nova/virt/libvirt/driver.py:4516 +#: nova/virt/libvirt/driver.py:4540 #, python-format msgid "The firewall filter for %s does not exist" msgstr "" -#: nova/virt/libvirt/driver.py:4579 +#: nova/virt/libvirt/driver.py:4603 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " "or your destination node does not support retrieving listen addresses. " @@ -8367,7 +7993,7 @@ msgid "" "address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." msgstr "" -#: nova/virt/libvirt/driver.py:4596 +#: nova/virt/libvirt/driver.py:4620 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," " and the graphics (VNC and/or SPICE) listen addresses on the destination" @@ -8377,39 +8003,51 @@ msgid "" "succeed, but the VM will continue to listen on the current addresses." msgstr "" -#: nova/virt/libvirt/driver.py:4964 +#: nova/virt/libvirt/driver.py:4997 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:5090 +#: nova/virt/libvirt/driver.py:5123 msgid "Unable to resize disk down." msgstr "" -#: nova/virt/libvirt/imagebackend.py:257 +#: nova/virt/libvirt/imagebackend.py:258 #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:272 +#: nova/virt/libvirt/imagebackend.py:273 msgid "Attempted overwrite of an existing value." msgstr "" -#: nova/virt/libvirt/imagebackend.py:433 +#: nova/virt/libvirt/imagebackend.py:316 +msgid "clone() is not implemented" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:449 msgid "You should specify images_volume_group flag to use LVM images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:548 +#: nova/virt/libvirt/imagebackend.py:522 msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" -#: nova/virt/libvirt/imagebackend.py:660 -msgid "rbd python libraries not found" +#: nova/virt/libvirt/imagebackend.py:612 +msgid "installed version of librbd does not support cloning" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:623 +msgid "Image is not raw format" +msgstr "" + +#: nova/virt/libvirt/imagebackend.py:631 +msgid "No image locations are accessible" msgstr "" -#: nova/virt/libvirt/imagebackend.py:703 +#: nova/virt/libvirt/imagebackend.py:651 #, python-format msgid "Unknown image_type=%s" msgstr "" @@ -8436,21 +8074,37 @@ msgstr "" msgid "volume_clear='%s' is not handled" msgstr "" +#: nova/virt/libvirt/rbd.py:104 +msgid "rbd python libraries not found" +msgstr "" + +#: nova/virt/libvirt/rbd.py:159 +msgid "Not stored in rbd" +msgstr "" + +#: nova/virt/libvirt/rbd.py:163 +msgid "Blank components" +msgstr "" + +#: nova/virt/libvirt/rbd.py:166 +msgid "Not an rbd snapshot" +msgstr "" + #: nova/virt/libvirt/utils.py:79 msgid "Cannot find any Fibre Channel HBAs" msgstr "" -#: nova/virt/libvirt/utils.py:437 +#: nova/virt/libvirt/utils.py:391 msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" -#: nova/virt/libvirt/vif.py:356 nova/virt/libvirt/vif.py:574 -#: nova/virt/libvirt/vif.py:750 +#: nova/virt/libvirt/vif.py:338 nova/virt/libvirt/vif.py:545 +#: nova/virt/libvirt/vif.py:709 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" -#: nova/virt/libvirt/vif.py:362 nova/virt/libvirt/vif.py:580 -#: nova/virt/libvirt/vif.py:756 +#: nova/virt/libvirt/vif.py:344 nova/virt/libvirt/vif.py:551 +#: nova/virt/libvirt/vif.py:715 #, python-format msgid "Unexpected vif_type=%s" msgstr "" @@ -8473,69 +8127,48 @@ msgstr "" msgid "Fibre Channel device not found." msgstr "" -#: nova/virt/vmwareapi/driver.py:104 -msgid "" -"The VMware ESX driver is now deprecated and will be removed in the Juno " -"release. The VC driver will remain and continue to be supported." -msgstr "" - -#: nova/virt/vmwareapi/driver.py:116 -msgid "" -"Must specify host_ip, host_username and host_password to use " -"compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:128 +#: nova/virt/vmwareapi/driver.py:127 #, python-format msgid "Invalid Regular Expression %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:243 -msgid "Instance cannot be found in host, or in an unknownstate." -msgstr "" - -#: nova/virt/vmwareapi/driver.py:403 +#: nova/virt/vmwareapi/driver.py:141 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "" -#: nova/virt/vmwareapi/driver.py:412 -#, python-format -msgid "The following clusters could not be found in the vCenter %s" -msgstr "" - -#: nova/virt/vmwareapi/driver.py:551 +#: nova/virt/vmwareapi/driver.py:319 #, python-format msgid "The resource %s does not exist" msgstr "" -#: nova/virt/vmwareapi/driver.py:597 +#: nova/virt/vmwareapi/driver.py:381 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:771 +#: nova/virt/vmwareapi/driver.py:555 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." msgstr "" -#: nova/virt/vmwareapi/driver.py:884 +#: nova/virt/vmwareapi/driver.py:678 #, python-format msgid "Unable to validate session %s!" msgstr "" -#: nova/virt/vmwareapi/driver.py:926 +#: nova/virt/vmwareapi/driver.py:720 #, python-format msgid "Session %s is inactive!" msgstr "" -#: nova/virt/vmwareapi/driver.py:1017 +#: nova/virt/vmwareapi/driver.py:811 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "" -#: nova/virt/vmwareapi/driver.py:1027 +#: nova/virt/vmwareapi/driver.py:821 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "" @@ -8556,15 +8189,15 @@ msgstr "" msgid "Capacity is smaller than free space" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:109 +#: nova/virt/vmwareapi/ds_util.py:111 msgid "datastore name empty" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:114 nova/virt/vmwareapi/ds_util.py:146 +#: nova/virt/vmwareapi/ds_util.py:116 nova/virt/vmwareapi/ds_util.py:148 msgid "path component cannot be None" msgstr "" -#: nova/virt/vmwareapi/ds_util.py:160 +#: nova/virt/vmwareapi/ds_util.py:162 msgid "datastore path empty" msgstr "" @@ -8724,26 +8357,26 @@ msgstr "" msgid "Unable to retrieve value for %(path)s Reason: %(reason)s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:196 +#: nova/virt/vmwareapi/vm_util.py:202 #, python-format msgid "%s is not supported." msgstr "" -#: nova/virt/vmwareapi/vm_util.py:989 +#: nova/virt/vmwareapi/vm_util.py:1037 msgid "No host available on cluster" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1083 +#: nova/virt/vmwareapi/vm_util.py:1131 #, python-format msgid "Failed to get cluster references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1095 +#: nova/virt/vmwareapi/vm_util.py:1143 #, python-format msgid "Failed to get resource pool references %s" msgstr "" -#: nova/virt/vmwareapi/vm_util.py:1285 +#: nova/virt/vmwareapi/vm_util.py:1334 msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None" msgstr "" @@ -8752,15 +8385,15 @@ msgstr "" msgid "Extending virtual disk failed with error: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:249 +#: nova/virt/vmwareapi/vmops.py:253 msgid "Image disk size greater than requested disk size" msgstr "" -#: nova/virt/vmwareapi/vmops.py:856 +#: nova/virt/vmwareapi/vmops.py:861 msgid "instance is not powered on" msgstr "" -#: nova/virt/vmwareapi/vmops.py:884 +#: nova/virt/vmwareapi/vmops.py:889 msgid "Instance does not exist on backend" msgstr "" @@ -8777,42 +8410,52 @@ msgid "" "contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:972 +#: nova/virt/vmwareapi/vmops.py:971 msgid "pause not supported for vmwareapi" msgstr "" -#: nova/virt/vmwareapi/vmops.py:976 +#: nova/virt/vmwareapi/vmops.py:975 msgid "unpause not supported for vmwareapi" msgstr "" -#: nova/virt/vmwareapi/vmops.py:994 +#: nova/virt/vmwareapi/vmops.py:993 msgid "instance is powered off and cannot be suspended." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1014 +#: nova/virt/vmwareapi/vmops.py:1013 msgid "instance is not in a suspended state" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1102 -msgid "instance is suspended and cannot be powered off." +#: nova/virt/vmwareapi/vmops.py:1113 +msgid "Unable to shrink disk." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1193 +#: nova/virt/vmwareapi/vmops.py:1172 #, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" " the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1255 nova/virt/xenapi/vmops.py:1497 +#: nova/virt/vmwareapi/vmops.py:1248 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1259 nova/virt/xenapi/vmops.py:1501 +#: nova/virt/vmwareapi/vmops.py:1252 nova/virt/xenapi/vmops.py:1504 msgid "Automatically hard rebooting" msgstr "" +#: nova/virt/vmwareapi/vmops.py:1570 +#, python-format +msgid "No device with interface-id %s exists on VM" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1580 +#, python-format +msgid "No device with MAC address %s exists on the VM" +msgstr "" + #: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375 #, python-format msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" @@ -8839,19 +8482,19 @@ msgstr "" msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" msgstr "" -#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1768 +#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1777 #, python-format msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" msgstr "" -#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1773 +#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1782 #, python-format msgid "" "NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " "args=%(args)r" msgstr "" -#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1778 +#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1787 #, python-format msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" msgstr "" @@ -8913,65 +8556,65 @@ msgstr "" msgid "Failure while cleaning up attached VDIs" msgstr "" -#: nova/virt/xenapi/driver.py:386 +#: nova/virt/xenapi/driver.py:390 #, python-format msgid "Could not determine key: %s" msgstr "" -#: nova/virt/xenapi/driver.py:636 +#: nova/virt/xenapi/driver.py:641 msgid "Host startup on XenServer is not supported." msgstr "" -#: nova/virt/xenapi/fake.py:811 +#: nova/virt/xenapi/fake.py:820 #, python-format msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/virt/xenapi/fake.py:919 +#: nova/virt/xenapi/fake.py:928 #, python-format msgid "" "xenapi.fake does not have an implementation for %s or it has been called " "with the wrong number of arguments" msgstr "" -#: nova/virt/xenapi/host.py:74 +#: nova/virt/xenapi/host.py:73 #, python-format msgid "" "Instance %(name)s running on %(host)s could not be found in the database:" " assuming it is a worker VM and skip ping migration to a new host" msgstr "" -#: nova/virt/xenapi/host.py:86 +#: nova/virt/xenapi/host.py:85 #, python-format msgid "Aggregate for host %(host)s count not be found." msgstr "" -#: nova/virt/xenapi/host.py:105 +#: nova/virt/xenapi/host.py:104 #, python-format msgid "Unable to migrate VM %(vm_ref)s from %(host)s" msgstr "" -#: nova/virt/xenapi/host.py:186 +#: nova/virt/xenapi/host.py:185 msgid "Failed to parse information about a pci device for passthrough" msgstr "" -#: nova/virt/xenapi/host.py:259 +#: nova/virt/xenapi/host.py:258 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to " "take effect." msgstr "" -#: nova/virt/xenapi/host.py:284 +#: nova/virt/xenapi/host.py:283 #, python-format msgid "Failed to extract instance support from %s" msgstr "" -#: nova/virt/xenapi/host.py:301 +#: nova/virt/xenapi/host.py:300 msgid "Unable to get updated status" msgstr "" -#: nova/virt/xenapi/host.py:304 +#: nova/virt/xenapi/host.py:303 #, python-format msgid "The call to %(method)s returned an error: %(e)s." msgstr "" @@ -9045,231 +8688,231 @@ msgid "" "Expected %(vlan_num)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:208 +#: nova/virt/xenapi/vm_utils.py:210 #, python-format msgid "" "Device id %(id)s specified is not supported by hypervisor version " "%(version)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:326 nova/virt/xenapi/vm_utils.py:341 +#: nova/virt/xenapi/vm_utils.py:328 nova/virt/xenapi/vm_utils.py:343 msgid "VM already halted, skipping shutdown..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:393 +#: nova/virt/xenapi/vm_utils.py:395 #, python-format msgid "VBD %s already detached" msgstr "" -#: nova/virt/xenapi/vm_utils.py:396 +#: nova/virt/xenapi/vm_utils.py:398 #, python-format msgid "" "VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt " "%(num_attempt)d/%(max_attempts)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:403 +#: nova/virt/xenapi/vm_utils.py:405 #, python-format msgid "Unable to unplug VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:406 +#: nova/virt/xenapi/vm_utils.py:408 #, python-format msgid "Reached maximum number of retries trying to unplug VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:418 +#: nova/virt/xenapi/vm_utils.py:420 #, python-format msgid "Unable to destroy VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:471 +#: nova/virt/xenapi/vm_utils.py:473 #, python-format msgid "Unable to destroy VDI %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:517 +#: nova/virt/xenapi/vm_utils.py:519 msgid "SR not present and could not be introduced" msgstr "" -#: nova/virt/xenapi/vm_utils.py:701 +#: nova/virt/xenapi/vm_utils.py:703 #, python-format msgid "No primary VDI found for %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:793 +#: nova/virt/xenapi/vm_utils.py:795 #, python-format msgid "" "Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s" " is of type %(type)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:872 +#: nova/virt/xenapi/vm_utils.py:874 #, python-format msgid "Multiple base images for image: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:927 +#: nova/virt/xenapi/vm_utils.py:929 #, python-format msgid "" "VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor " "size of %(new_disk_size)d bytes." msgstr "" -#: nova/virt/xenapi/vm_utils.py:938 nova/virt/xenapi/vmops.py:1037 +#: nova/virt/xenapi/vm_utils.py:940 nova/virt/xenapi/vmops.py:1040 msgid "Can't resize a disk to 0 GB." msgstr "" -#: nova/virt/xenapi/vm_utils.py:990 +#: nova/virt/xenapi/vm_utils.py:992 msgid "Disk must have only one partition." msgstr "" -#: nova/virt/xenapi/vm_utils.py:995 +#: nova/virt/xenapi/vm_utils.py:997 #, python-format msgid "Disk contains a filesystem we are unable to resize: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1000 +#: nova/virt/xenapi/vm_utils.py:1002 msgid "The only partition should be partition 1." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1011 +#: nova/virt/xenapi/vm_utils.py:1013 #, python-format msgid "Attempted auto_configure_disk failed because: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1262 +#: nova/virt/xenapi/vm_utils.py:1264 #, python-format msgid "" "Fast cloning is only supported on default local SR of type ext. SR on " "this system was found to be of type %s. Ignoring the cow flag." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1337 +#: nova/virt/xenapi/vm_utils.py:1339 #, python-format msgid "Unrecognized cache_images value '%s', defaulting to True" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1413 +#: nova/virt/xenapi/vm_utils.py:1415 #, python-format msgid "Invalid value '%s' for torrent_images" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1436 +#: nova/virt/xenapi/vm_utils.py:1438 #, python-format msgid "Invalid value '%d' for image_compression_level" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1462 +#: nova/virt/xenapi/vm_utils.py:1464 #, python-format msgid "" "Download handler '%(handler)s' raised an exception, falling back to " "default handler '%(default_handler)s'" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1518 +#: nova/virt/xenapi/vm_utils.py:1520 #, python-format msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1569 +#: nova/virt/xenapi/vm_utils.py:1571 #, python-format msgid "" "Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " "bytes" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1611 +#: nova/virt/xenapi/vm_utils.py:1613 msgid "Failed to fetch glance image" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1819 +#: nova/virt/xenapi/vm_utils.py:1846 #, python-format msgid "Unable to parse rrd of %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1849 +#: nova/virt/xenapi/vm_utils.py:1876 #, python-format msgid "Retry SR scan due to error: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1882 +#: nova/virt/xenapi/vm_utils.py:1909 #, python-format msgid "Flag sr_matching_filter '%s' does not respect formatting convention" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1903 +#: nova/virt/xenapi/vm_utils.py:1930 msgid "" "XenAPI is unable to find a Storage Repository to install guest instances " "on. Please check your configuration (e.g. set a default SR for the pool) " "and/or configure the flag 'sr_matching_filter'." msgstr "" -#: nova/virt/xenapi/vm_utils.py:1916 +#: nova/virt/xenapi/vm_utils.py:1943 msgid "Cannot find SR of content-type ISO" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1969 +#: nova/virt/xenapi/vm_utils.py:1996 #, python-format msgid "" "Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " "%(server)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2097 +#: nova/virt/xenapi/vm_utils.py:2124 #, python-format msgid "VHD coalesce attempts exceeded (%d), giving up..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2132 +#: nova/virt/xenapi/vm_utils.py:2159 #, python-format msgid "Timeout waiting for device %s to be created" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2152 +#: nova/virt/xenapi/vm_utils.py:2179 #, python-format msgid "Disconnecting stale VDI %s from compute domU" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2310 +#: nova/virt/xenapi/vm_utils.py:2337 msgid "" "Shrinking the filesystem down with resize2fs has failed, please check if " "you have enough free space on your disk." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2445 +#: nova/virt/xenapi/vm_utils.py:2472 msgid "Manipulating interface files directly" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2454 +#: nova/virt/xenapi/vm_utils.py:2481 #, python-format msgid "Failed to mount filesystem (expected for non-linux instances): %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2566 +#: nova/virt/xenapi/vm_utils.py:2496 msgid "This domU must be running on the host specified by connection_url" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2635 +#: nova/virt/xenapi/vm_utils.py:2565 msgid "Failed to transfer vhd to new host" msgstr "" -#: nova/virt/xenapi/vm_utils.py:2661 +#: nova/virt/xenapi/vm_utils.py:2591 msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2667 +#: nova/virt/xenapi/vm_utils.py:2597 msgid "ipxe_network_name not set, user will have to enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2678 +#: nova/virt/xenapi/vm_utils.py:2608 #, python-format msgid "" "Unable to find network matching '%(network_name)s', user will have to " "enter IP manually..." msgstr "" -#: nova/virt/xenapi/vm_utils.py:2702 +#: nova/virt/xenapi/vm_utils.py:2632 #, python-format msgid "ISO creation tool '%s' does not exist." msgstr "" @@ -9278,104 +8921,104 @@ msgstr "" msgid "Error: Agent is disabled" msgstr "" -#: nova/virt/xenapi/vmops.py:375 +#: nova/virt/xenapi/vmops.py:378 msgid "ipxe_boot is True but no ISO image found" msgstr "" -#: nova/virt/xenapi/vmops.py:518 +#: nova/virt/xenapi/vmops.py:521 msgid "Failed to spawn, rolling back" msgstr "" -#: nova/virt/xenapi/vmops.py:783 +#: nova/virt/xenapi/vmops.py:786 msgid "Unable to terminate instance." msgstr "" -#: nova/virt/xenapi/vmops.py:835 +#: nova/virt/xenapi/vmops.py:838 #, python-format msgid "_migrate_disk_resizing_down failed. Restoring orig vm due_to: %s." msgstr "" -#: nova/virt/xenapi/vmops.py:989 +#: nova/virt/xenapi/vmops.py:992 #, python-format msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s." msgstr "" -#: nova/virt/xenapi/vmops.py:996 +#: nova/virt/xenapi/vmops.py:999 #, python-format msgid "_migrate_disk_resizing_up failed to rollback: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:1013 +#: nova/virt/xenapi/vmops.py:1016 msgid "Can't resize down ephemeral disks." msgstr "" -#: nova/virt/xenapi/vmops.py:1124 +#: nova/virt/xenapi/vmops.py:1127 msgid "Starting halted instance found during reboot" msgstr "" -#: nova/virt/xenapi/vmops.py:1130 +#: nova/virt/xenapi/vmops.py:1133 msgid "" "Reboot failed due to bad volumes, detaching bad volumes and starting " "halted instance" msgstr "" -#: nova/virt/xenapi/vmops.py:1208 +#: nova/virt/xenapi/vmops.py:1211 msgid "Unable to update metadata, VM not found." msgstr "" -#: nova/virt/xenapi/vmops.py:1254 +#: nova/virt/xenapi/vmops.py:1257 msgid "Unable to find root VBD/VDI for VM" msgstr "" -#: nova/virt/xenapi/vmops.py:1292 +#: nova/virt/xenapi/vmops.py:1295 msgid "instance has a kernel or ramdisk but not both" msgstr "" -#: nova/virt/xenapi/vmops.py:1326 +#: nova/virt/xenapi/vmops.py:1329 msgid "Destroying VM" msgstr "" -#: nova/virt/xenapi/vmops.py:1355 +#: nova/virt/xenapi/vmops.py:1358 msgid "VM is not present, skipping destroy..." msgstr "" -#: nova/virt/xenapi/vmops.py:1406 +#: nova/virt/xenapi/vmops.py:1409 #, python-format msgid "Instance is already in Rescue Mode: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:1448 +#: nova/virt/xenapi/vmops.py:1451 msgid "VM is not present, skipping soft delete..." msgstr "" -#: nova/virt/xenapi/vmops.py:1834 +#: nova/virt/xenapi/vmops.py:1843 #, python-format msgid "Destination host:%s must be in the same aggregate as the source server" msgstr "" -#: nova/virt/xenapi/vmops.py:1855 +#: nova/virt/xenapi/vmops.py:1864 msgid "No suitable network for migrate" msgstr "" -#: nova/virt/xenapi/vmops.py:1861 +#: nova/virt/xenapi/vmops.py:1870 #, python-format msgid "PIF %s does not contain IP address" msgstr "" -#: nova/virt/xenapi/vmops.py:1874 +#: nova/virt/xenapi/vmops.py:1883 msgid "Migrate Receive failed" msgstr "" -#: nova/virt/xenapi/vmops.py:1948 +#: nova/virt/xenapi/vmops.py:1957 msgid "XAPI supporting relax-xsm-sr-check=true required" msgstr "" -#: nova/virt/xenapi/vmops.py:1959 +#: nova/virt/xenapi/vmops.py:1968 #, python-format msgid "assert_can_migrate failed because: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:2019 +#: nova/virt/xenapi/vmops.py:2028 msgid "Migrate Send failed" msgstr "" @@ -9430,6 +9073,11 @@ msgstr "" msgid "Unable to find SR from VBD %s" msgstr "" +#: nova/virt/xenapi/volume_utils.py:311 +#, python-format +msgid "Unable to find SR from VDI %s" +msgstr "" + #: nova/virt/xenapi/volumeops.py:63 #, python-format msgid "Connected volume (vdi_uuid): %s" @@ -9510,11 +9158,16 @@ msgstr "" msgid "Starting nova-xvpvncproxy node (version %s)" msgstr "" -#: nova/volume/cinder.py:236 +#: nova/volume/cinder.py:257 +#, python-format +msgid "Invalid client version, must be one of: %s" +msgstr "" + +#: nova/volume/cinder.py:281 msgid "status must be 'in-use'" msgstr "" -#: nova/volume/cinder.py:242 +#: nova/volume/cinder.py:287 msgid "status must be 'available'" msgstr "" diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po index da03d9857d..e66d4d2e70 100644 --- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/" @@ -39,16 +39,265 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Falha ao notificar células de falha da instância" @@ -68,11 +317,11 @@ msgstr "Exceção não esperada ocorreu %d vez(es)... tentando novamente." msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "em uma chamada de laço de duração fixa" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "em chamada de laço dinâmico" @@ -121,137 +370,151 @@ msgstr "Exceção de BD incluída." msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -272,15 +535,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -299,8 +566,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po index 9694439001..1da5c68366 100644 --- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,102 +146,106 @@ msgstr "Excluindo linha duplicada com ID: %(id)s da tabela: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "Instância destruída com êxito." -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "A instância pode ser iniciada novamente." -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "Destruindo a instância novamente." -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "Começando o processo de captura instantânea em tempo real" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "Iniciando processo de captura instantânea a frio" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "Captura instantânea extraída, iniciando upload da imagem" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "Upload da imagem de captura instantânea concluído" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "Reinicialização virtual da instância bem-sucedida." -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "A instância foi encerrada com êxito." -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "A instância pode ter sido reinicializada durante a reinicialização virtual, " "portanto retorne agora." -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "Instância reinicializada com êxito." -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "Feito spawn da instância com êxito." -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "dados: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Log do console truncado retornado, %d bytes ignorados" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "Criando imagem" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "Usando unidade de configuração" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "Criando unidade de configuração em %(path)s" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -211,39 +254,39 @@ msgstr "" "Não foi possível localizar o domínio em libvirt para a instância %s. Não é " "possível obter estatísticas do bloco para o dispositivo" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "Instância executando com êxito." -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "Chamada setup_basic_filtering em nwfilter" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "Assegurando filtros estáticos" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "Tentou cancelar a filtragem da instância que não foi filtrada" @@ -304,11 +347,11 @@ msgstr "Arquivos base corrompidos: %s" msgid "Removable base files: %s" msgstr "Arquivos base removíveis: %s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po index 0eebfc183b..5c174f713d 100644 --- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:04+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/" @@ -39,16 +39,265 @@ msgstr "" msgid "Exception running %(name)s post-hook: %(obj)s" msgstr "" -#: nova/api/ec2/__init__.py:243 +#: nova/api/ec2/__init__.py:244 #, python-format msgid "Keystone failure: %s" msgstr "" -#: nova/compute/manager.py:5416 +#: nova/api/ec2/__init__.py:493 +#, python-format +msgid "Unexpected %(ex_name)s raised: %(ex_str)s" +msgstr "" + +#: nova/api/ec2/__init__.py:520 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: nova/api/metadata/handler.py:155 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/metadata/handler.py:212 +#, python-format +msgid "Failed to get metadata for instance id: %s" +msgstr "" + +#: nova/api/openstack/common.py:134 +#, python-format +msgid "" +"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad " +"upgrade or db corrupted?" +msgstr "" + +#: nova/api/openstack/wsgi.py:684 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:68 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:90 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:112 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:134 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:160 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:179 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:198 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:215 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:234 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/admin_actions.py:392 +#, python-format +msgid "Compute.api::resetState %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/multinic.py:85 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85 +msgid "Failed to get default networks" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125 +msgid "Failed to update usages deallocating network." +msgstr "" + +#: nova/compute/api.py:561 +msgid "Failed to set instance name using multi_instance_display_name_template." +msgstr "" + +#: nova/compute/api.py:1429 +msgid "" +"Something wrong happened when trying to delete snapshot from shelved " +"instance." +msgstr "" + +#: nova/compute/api.py:3732 +msgid "Failed to update usages deallocating security group" +msgstr "" + +#: nova/compute/flavors.py:167 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: nova/compute/flavors.py:178 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: nova/compute/manager.py:366 +#, python-format +msgid "Error while trying to clean up image %s" +msgstr "" + +#: nova/compute/manager.py:755 +msgid "Failed to check if instance shared" +msgstr "" + +#: nova/compute/manager.py:821 nova/compute/manager.py:872 +msgid "Failed to complete a deletion" +msgstr "" + +#: nova/compute/manager.py:913 +msgid "Failed to stop instance" +msgstr "" + +#: nova/compute/manager.py:925 +msgid "Failed to start instance" +msgstr "" + +#: nova/compute/manager.py:950 +msgid "Failed to revert crashed migration" +msgstr "" + +#: nova/compute/manager.py:1364 +msgid "Failed to dealloc network for deleted instance" +msgstr "" + +#: nova/compute/manager.py:1385 +msgid "Failed to dealloc network for failed instance" +msgstr "" + +#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +msgid "Error trying to reschedule" +msgstr "" + +#: nova/compute/manager.py:1567 +#, python-format +msgid "Instance failed network setup after %(attempts)d attempt(s)" +msgstr "" + +#: nova/compute/manager.py:1755 +msgid "Instance failed block device setup" +msgstr "" + +#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 +#: nova/compute/manager.py:4058 +msgid "Instance failed to spawn" +msgstr "" + +#: nova/compute/manager.py:1957 +msgid "Unexpected build failure, not rescheduling build." +msgstr "" + +#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +msgid "Failed to allocate network(s)" +msgstr "" + +#: nova/compute/manager.py:2104 +msgid "Failure prepping block device" +msgstr "" + +#: nova/compute/manager.py:2137 +msgid "Failed to deallocate networks" +msgstr "" + +#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 +#: nova/compute/manager.py:5803 +msgid "Setting instance vm_state to ERROR" +msgstr "" + +#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#, python-format +msgid "Failed to get compute_info for %s" +msgstr "" + +#: nova/compute/manager.py:3005 +#, python-format +msgid "set_admin_password failed: %s" +msgstr "" + +#: nova/compute/manager.py:3090 +msgid "Error trying to Rescue Instance" +msgstr "" + +#: nova/compute/manager.py:3711 +#, python-format +msgid "Failed to rollback quota for failed finish_resize: %s" +msgstr "" + +#: nova/compute/manager.py:4310 +#, python-format +msgid "Failed to attach %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4349 +#, python-format +msgid "Failed to detach volume %(volume_id)s from %(mp)s" +msgstr "" + +#: nova/compute/manager.py:4422 +#, python-format +msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" +msgstr "" + +#: nova/compute/manager.py:4429 +#, python-format +msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" +msgstr "" + +#: nova/compute/manager.py:4716 +#, python-format +msgid "Pre live migration failed at %s" +msgstr "" + +#: nova/compute/manager.py:5216 +msgid "Periodic task failed to offload instance." +msgstr "" + +#: nova/compute/manager.py:5256 +#, python-format +msgid "Failed to generate usage audit for instance on host %s" +msgstr "" + +#: nova/compute/manager.py:5446 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" +#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 +#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +msgid "error during stop() in sync_power_state." +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "未能通知单元有关实例故障的事项" @@ -68,11 +317,11 @@ msgstr "意外的异常已发生 %d 次...正在重试。" msgid "Could not release the acquired lock `%s`" msgstr "" -#: nova/openstack/common/loopingcall.py:89 +#: nova/openstack/common/loopingcall.py:95 msgid "in fixed duration looping call" msgstr "在固定时段内循环调用" -#: nova/openstack/common/loopingcall.py:136 +#: nova/openstack/common/loopingcall.py:138 msgid "in dynamic looping call" msgstr "在动态循环调用中" @@ -121,137 +370,151 @@ msgstr "数据库异常被包裹。" msgid "Failed to migrate to version %s on engine %s" msgstr "" -#: nova/virt/libvirt/driver.py:639 +#: nova/pci/pci_stats.py:119 +msgid "" +"Failed to allocate PCI devices for instance. Unassigning devices back to " +"pools. This should not happen, since the scheduler should have accurate " +"information, and allocation during claims is controlled via a hold on the " +"compute node semaphore" +msgstr "" + +#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109 +#, python-format +msgid "PCI device %s not found" +msgstr "" + +#: nova/virt/disk/api.py:388 +#, python-format +msgid "" +"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" +msgstr "" + +#: nova/virt/libvirt/driver.py:641 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:764 +#: nova/virt/libvirt/driver.py:766 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:937 +#: nova/virt/libvirt/driver.py:929 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1015 +#: nova/virt/libvirt/driver.py:1007 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1041 +#: nova/virt/libvirt/driver.py:1035 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1445 +#: nova/virt/libvirt/driver.py:1438 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1470 +#: nova/virt/libvirt/driver.py:1465 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1719 +#: nova/virt/libvirt/driver.py:1717 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1827 +#: nova/virt/libvirt/driver.py:1825 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1833 +#: nova/virt/libvirt/driver.py:1831 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1882 +#: nova/virt/libvirt/driver.py:1880 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2028 +#: nova/virt/libvirt/driver.py:2026 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2493 nova/virt/libvirt/driver.py:2498 +#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2619 +#: nova/virt/libvirt/driver.py:2620 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2770 +#: nova/virt/libvirt/driver.py:2788 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2863 +#: nova/virt/libvirt/driver.py:2881 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3645 +#: nova/virt/libvirt/driver.py:3680 #, python-format -msgid "An error occurred while trying to define a domain with xml: %s" +msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3654 +#: nova/virt/libvirt/driver.py:3684 #, python-format -msgid "An error occurred while trying to launch a defined domain with xml: %s" +msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3663 +#: nova/virt/libvirt/driver.py:3689 #, python-format -msgid "An error occurred while enabling hairpin mode on domain with xml: %s" +msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3681 +#: nova/virt/libvirt/driver.py:3703 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3988 +#: nova/virt/libvirt/driver.py:4012 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4667 +#: nova/virt/libvirt/driver.py:4691 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5419 +#: nova/virt/libvirt/driver.py:5487 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:201 +#: nova/virt/libvirt/imagebackend.py:200 #, python-format msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:229 +#: nova/virt/libvirt/imagebackend.py:227 #, python-format msgid "" "%(base)s virtual size %(base_size)s larger than flavor root disk size " "%(size)s" msgstr "" -#: nova/virt/libvirt/imagebackend.py:505 -#, python-format -msgid "error opening rbd image %s" -msgstr "" - #: nova/virt/libvirt/imagecache.py:130 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" @@ -272,15 +535,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/vif.py:514 nova/virt/libvirt/vif.py:538 -#: nova/virt/libvirt/vif.py:562 +#: nova/virt/libvirt/rbd.py:62 +#, python-format +msgid "error opening rbd image %s" +msgstr "" + +#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 +#: nova/virt/libvirt/vif.py:533 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:597 nova/virt/libvirt/vif.py:629 -#: nova/virt/libvirt/vif.py:648 nova/virt/libvirt/vif.py:670 -#: nova/virt/libvirt/vif.py:690 nova/virt/libvirt/vif.py:715 -#: nova/virt/libvirt/vif.py:737 +#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 +#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 +#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 msgid "Failed while unplugging vif" msgstr "" @@ -299,8 +566,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:500 +#: nova/virt/vmwareapi/vmops.py:509 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1553 +#, python-format +msgid "Attaching network adapter failed. Exception: %s" +msgstr "" + +#: nova/virt/vmwareapi/vmops.py:1593 +#, python-format +msgid "Detaching network adapter failed. Exception: %s" +msgstr "" diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po index 9b2f86ba87..eba72ed4f7 100644 --- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,139 +146,143 @@ msgstr "正在从表 %(table)s 中删除具有id %(id)s 的重复行" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "实例销毁成功。" -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "可再次启动实例。" -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "将再次销毁实例。" -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "正在开始实时快照流程" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "正在结束冷快照流程" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "已抽取快照,正在开始映像上载" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "快照映像上载完成" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "已成功执行实例软重新引导。" -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "已成功关闭实例。" -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "在软重新引导期间,可能已重新引导实例,因此会立即返回。" -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "实例成功重启。" -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "实例成功生产。" -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data:%(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "已返回截断的控制台日志,忽略了 %d 个字节" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "正在创建镜像" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "正在使用配置驱动器" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "正在 %(path)s 处创建配置驱动器" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "对于实例 %s,在 libvirt 中找不到域。无法获取设备的块统计信息" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "实例正在成功运行。" -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "在 nwfilter 里调用 setup_basic_filtering" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "正在确保静态过滤器" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "试图不过滤没有过滤的实例" @@ -298,11 +341,11 @@ msgstr "损坏的基文件:%s" msgid "Removable base files: %s" msgstr "可删除的基文件:%s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po index 0c21ef667d..76a4587a3c 100644 --- a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-07-21 06:03+0000\n" +"POT-Creation-Date: 2014-08-12 06:05+0000\n" "PO-Revision-Date: 2014-06-18 19:31+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/nova/" @@ -19,12 +19,51 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: nova/compute/manager.py:5422 +#: nova/api/openstack/__init__.py:101 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: nova/api/openstack/__init__.py:294 +msgid "V3 API has been disabled by configuration" +msgstr "" + +#: nova/api/openstack/wsgi.py:688 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: nova/api/openstack/wsgi.py:691 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: nova/api/openstack/compute/contrib/os_networks.py:101 +#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: nova/compute/manager.py:5452 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36 +msgid "" +"Skipped adding reservations_deleted_expire_idx because an equivalent index " +"already exists." +msgstr "" + +#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58 +#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58 +msgid "" +"Skipped removing reservations_deleted_expire_idx because index does not " +"exist." +msgstr "" + #: nova/openstack/common/eventlet_backdoor.py:141 #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" @@ -107,139 +146,143 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" -#: nova/virt/firewall.py:446 +#: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/libvirt/driver.py:839 +#: nova/virt/disk/vfs/guestfs.py:137 +msgid "Unable to force TCG mode, libguestfs too old?" +msgstr "" + +#: nova/virt/libvirt/driver.py:837 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:958 +#: nova/virt/libvirt/driver.py:950 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:968 +#: nova/virt/libvirt/driver.py:960 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:978 +#: nova/virt/libvirt/driver.py:970 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1574 +#: nova/virt/libvirt/driver.py:1570 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1577 +#: nova/virt/libvirt/driver.py:1573 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1606 +#: nova/virt/libvirt/driver.py:1602 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1618 +#: nova/virt/libvirt/driver.py:1614 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2049 +#: nova/virt/libvirt/driver.py:2047 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2092 +#: nova/virt/libvirt/driver.py:2090 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2100 +#: nova/virt/libvirt/driver.py:2098 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2168 +#: nova/virt/libvirt/driver.py:2167 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2336 +#: nova/virt/libvirt/driver.py:2335 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2352 +#: nova/virt/libvirt/driver.py:2351 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2391 nova/virt/libvirt/driver.py:2418 +#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2645 +#: nova/virt/libvirt/driver.py:2646 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2754 +#: nova/virt/libvirt/driver.py:2772 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2763 +#: nova/virt/libvirt/driver.py:2781 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3315 +#: nova/virt/libvirt/driver.py:3334 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4193 +#: nova/virt/libvirt/driver.py:4217 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4199 +#: nova/virt/libvirt/driver.py:4223 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4441 +#: nova/virt/libvirt/driver.py:4465 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5174 +#: nova/virt/libvirt/driver.py:5207 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5414 +#: nova/virt/libvirt/driver.py:5481 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5426 +#: nova/virt/libvirt/driver.py:5494 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5429 +#: nova/virt/libvirt/driver.py:5498 #, python-format msgid "Deletion of %s complete" msgstr "" -#: nova/virt/libvirt/firewall.py:105 +#: nova/virt/libvirt/firewall.py:106 msgid "Called setup_basic_filtering in nwfilter" msgstr "" -#: nova/virt/libvirt/firewall.py:113 +#: nova/virt/libvirt/firewall.py:114 msgid "Ensuring static filters" msgstr "" -#: nova/virt/libvirt/firewall.py:304 +#: nova/virt/libvirt/firewall.py:305 msgid "Attempted to unfilter instance which is not filtered" msgstr "" @@ -298,11 +341,11 @@ msgstr "已毀損的基本檔案:%s" msgid "Removable base files: %s" msgstr "可移除的基本檔案:%s" -#: nova/virt/libvirt/utils.py:536 +#: nova/virt/libvirt/utils.py:490 msgid "findmnt tool is not installed" msgstr "" -#: nova/virt/xenapi/vm_utils.py:1353 +#: nova/virt/xenapi/vm_utils.py:1355 #, python-format msgid "" "Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s " From eea7231be67376b5ece03aeb94aed309f524e8b7 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Tue, 12 Aug 2014 16:47:57 +0900 Subject: [PATCH 333/486] Backport some v3 certificate API unittest to v2 API This patch add some v3 certificate API unittest into V2 API to improve V2 API unittesting. Change-Id: Icf58abbf1e83636047c1f98ff80a38077e2d694b --- .../compute/contrib/test_certificates.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/nova/tests/api/openstack/compute/contrib/test_certificates.py b/nova/tests/api/openstack/compute/contrib/test_certificates.py index 12cc06f710..af0ea6060c 100644 --- a/nova/tests/api/openstack/compute/contrib/test_certificates.py +++ b/nova/tests/api/openstack/compute/contrib/test_certificates.py @@ -19,6 +19,9 @@ from nova.api.openstack.compute.contrib import certificates from nova import context +from nova import exception +from nova.openstack.common import policy as common_policy +from nova import policy from nova import test from nova.tests.api.openstack import fakes @@ -49,6 +52,18 @@ def test_certificates_show_root(self): response = {'certificate': {'data': 'fakeroot', 'private_key': None}} self.assertEqual(res_dict, response) + def test_certificates_show_policy_failed(self): + rules = { + "compute_extension:certificates": + common_policy.parse_rule("!") + } + policy.set_rules(rules) + req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root') + exc = self.assertRaises(exception.PolicyNotAuthorized, + self.controller.show, req, 'root') + self.assertIn("compute_extension:certificates", + exc.format_message()) + def test_certificates_create_certificate(self): self.mox.StubOutWithMock(self.controller.cert_rpcapi, 'generate_x509_cert') @@ -69,6 +84,18 @@ def test_certificates_create_certificate(self): } self.assertEqual(res_dict, response) + def test_certificates_create_policy_failed(self): + rules = { + "compute_extension:certificates": + common_policy.parse_rule("!") + } + policy.set_rules(rules) + req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/') + exc = self.assertRaises(exception.PolicyNotAuthorized, + self.controller.create, req) + self.assertIn("compute_extension:certificates", + exc.format_message()) + class CertificatesSerializerTest(test.NoDBTestCase): def test_index_serializer(self): From 7b15ed36cdf16f53255ded135bb3c49c2ae76d3b Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 12 Aug 2014 09:55:43 +0200 Subject: [PATCH 334/486] libvirt: fix _disk_resize to make sure converted image will be restored During the process of resizing disk if an image is in qcow2 with partition less the process converts the image to raw. After the extend we should to restore the original format in all cases not only if 'use_cow_images' is configured to True. Change-Id: I792e0fb986a6c5cf9ac477cef0949c8a40099faa Closes-Bug: #1298976 --- nova/tests/virt/libvirt/test_driver.py | 45 -------------------------- nova/virt/libvirt/driver.py | 6 ++-- 2 files changed, 4 insertions(+), 47 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index b237ba6156..db8043a68e 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -9578,26 +9578,9 @@ def test_disk_qcow2_to_raw(self, mock_execute): def test_disk_resize_raw(self, mock_extend): info = {'type': 'raw', 'path': '/test/disk'} - self.flags(use_cow_images=False) - self.libvirtconnection._disk_resize(info, 50) mock_extend.assert_called_once_with(info['path'], 50, use_cow=False) - @mock.patch('nova.virt.disk.api.extend') - def test_disk_resize_raw_use_cow_images(self, mock_extend): - info = {'type': 'raw', 'path': '/test/disk'} - - self.flags(use_cow_images=True) - - with mock.patch.object( - self.libvirtconnection, '_disk_raw_to_qcow2') as mock_convert: - - self.libvirtconnection._disk_resize(info, 50) - - mock_convert.assert_called_once_with(info['path']) - mock_extend.assert_called_once_with( - info['path'], 50, use_cow=False) - @mock.patch('nova.virt.disk.api.can_resize_image') @mock.patch('nova.virt.disk.api.is_image_partitionless') @mock.patch('nova.virt.disk.api.extend') @@ -9605,34 +9588,6 @@ def test_disk_resize_qcow2( self, mock_extend, mock_can_resize, mock_is_partitionless): info = {'type': 'qcow2', 'path': '/test/disk'} - self.flags(use_cow_images=False) - - with contextlib.nested( - mock.patch.object( - self.libvirtconnection, '_disk_qcow2_to_raw'), - mock.patch.object( - self.libvirtconnection, '_disk_raw_to_qcow2'))\ - as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2): - - mock_can_resize.return_value = True - mock_is_partitionless.return_value = True - - self.libvirtconnection._disk_resize(info, 50) - - mock_disk_qcow2_to_raw.assert_called_once_with(info['path']) - mock_extend.assert_called_once_with( - info['path'], 50, use_cow=False) - self.assertFalse(mock_disk_raw_to_qcow2.called) - - @mock.patch('nova.virt.disk.api.can_resize_image') - @mock.patch('nova.virt.disk.api.is_image_partitionless') - @mock.patch('nova.virt.disk.api.extend') - def test_disk_resize_qcow2_use_cow_images( - self, mock_extend, mock_can_resize, mock_is_partitionless): - info = {'type': 'qcow2', 'path': '/test/disk'} - - self.flags(use_cow_images=True) - with contextlib.nested( mock.patch.object( self.libvirtconnection, '_disk_qcow2_to_raw'), diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 1d6a99bd1e..da3fb55d2d 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -5252,7 +5252,7 @@ def _disk_resize(self, info, size): """ # If we have a non partitioned image that we can extend # then ensure we're in 'raw' format so we can extend file system. - fmt = info['type'] + fmt, org = [info['type']] * 2 pth = info['path'] if (size and fmt == 'qcow2' and disk.can_resize_image(pth, size) and @@ -5264,7 +5264,7 @@ def _disk_resize(self, info, size): use_cow = fmt == 'qcow2' disk.extend(pth, size, use_cow=use_cow) - if fmt == 'raw' and CONF.use_cow_images: + if fmt != org: # back to qcow2 (no backing_file though) so that snapshot # will be available self._disk_raw_to_qcow2(pth) @@ -5279,6 +5279,8 @@ def finish_migration(self, context, migration, instance, disk_info, for info in disk_info: size = self._disk_size_from_instance(instance, info) self._disk_resize(info, size) + if info['type'] == 'raw' and CONF.use_cow_images: + self._disk_raw_to_qcow2(info['path']) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, From ddd92b229daa31f6d8f6683986e135ebd36829c5 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 12 Aug 2014 15:11:01 +0200 Subject: [PATCH 335/486] libvirt: skip disk resize when resize_instance is False The method finish_migration is called with an extra parameter 'resize_instance'. It should be used to know if it is necessary or not to resize the disks and so call the method 'disk_resize'. Change-Id: Ia5a9ad7994fa9abab7cf47d36468c46d97065991 Closes-Bug: #1298981 --- nova/tests/virt/libvirt/test_driver.py | 25 ++++++++++++++----------- nova/virt/libvirt/driver.py | 3 ++- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index db8043a68e..b8c6f2bb15 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -9605,7 +9605,7 @@ def test_disk_resize_qcow2( info['path'], 50, use_cow=False) mock_disk_raw_to_qcow2.assert_called_once_with(info['path']) - def _test_finish_migration(self, power_on): + def _test_finish_migration(self, power_on, resize_instance=False): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_migration. """ @@ -9617,12 +9617,7 @@ def _test_finish_migration(self, power_on): disk_info_text = jsonutils.dumps(disk_info) powered_on = power_on self.fake_create_domain_called = False - - def fake_can_resize_image(path, size): - return False - - def fake_extend(path, size, use_cow=False): - pass + self.fake_disk_resize_called = False def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, @@ -9656,10 +9651,12 @@ def fake_get_info(instance): else: return {'state': power_state.SHUTDOWN} + def fake_disk_resize(info, size): + self.fake_disk_resize_called = True + self.flags(use_cow_images=True) - self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend) - self.stubs.Set(libvirt_driver.disk, 'can_resize_image', - fake_can_resize_image) + self.stubs.Set(self.libvirtconnection, '_disk_resize', + fake_disk_resize) self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs) self.stubs.Set(self.libvirtconnection, '_create_image', @@ -9678,8 +9675,14 @@ def fake_get_info(instance): self.libvirtconnection.finish_migration( context.get_admin_context(), None, ins_ref, - disk_info_text, [], None, None, None, power_on) + disk_info_text, [], None, + resize_instance, None, power_on) self.assertTrue(self.fake_create_domain_called) + self.assertEqual( + resize_instance, self.fake_disk_resize_called) + + def test_finish_migration_resize(self): + self._test_finish_migration(True, resize_instance=True) def test_finish_migration_power_on(self): self._test_finish_migration(True) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index da3fb55d2d..26fb970da7 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -5278,7 +5278,8 @@ def finish_migration(self, context, migration, instance, disk_info, disk_info = jsonutils.loads(disk_info) for info in disk_info: size = self._disk_size_from_instance(instance, info) - self._disk_resize(info, size) + if resize_instance: + self._disk_resize(info, size) if info['type'] == 'raw' and CONF.use_cow_images: self._disk_raw_to_qcow2(info['path']) From 1246a1494d32cd8a655db9252ce85bc2774fc16c Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 11 Aug 2014 10:03:56 -0700 Subject: [PATCH 336/486] Convert last use of direct database instance fetching from network api This is the last db.instance_*() call in network/api.py. This patch converts it to use the Instance object. Related to blueprint compute-manager-objects-juno Change-Id: Id7897da6daa3d76a95403466be17f9eaef8b7850 --- nova/network/api.py | 2 +- nova/tests/network/test_api.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/nova/network/api.py b/nova/network/api.py index 7240fa0430..a8ef7b294b 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -195,7 +195,7 @@ def associate_floating_ip(self, context, instance, instance_id=orig_instance_uuid) LOG.info(_('re-assign floating IP %(address)s from ' 'instance %(instance_id)s') % msg_dict) - orig_instance = self.db.instance_get_by_uuid(context, + orig_instance = objects.Instance.get_by_uuid(context, orig_instance_uuid) # purge cached nw info for the original instance diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index 35b3cced3c..5b2c76437a 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -172,8 +172,10 @@ def fake_associate(*args, **kwargs): self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip', fake_associate) - def fake_instance_get_by_uuid(context, instance_uuid): - return {'uuid': instance_uuid} + def fake_instance_get_by_uuid(context, instance_uuid, + columns_to_join=None, + use_slave=None): + return fake_instance.fake_db_instance(uuid=instance_uuid) self.stubs.Set(self.network_api.db, 'instance_get_by_uuid', fake_instance_get_by_uuid) From aaa2cd14405129f1aabd85bf86ccecf25e52f8c1 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 11 Aug 2014 11:21:19 -0700 Subject: [PATCH 337/486] Convert network/api.py uses of vif database functions to objects This makes network/api.py use the VirtualInterface object instead of direct database queries. This is the last direct/obvious use of nova.db.* in the network api. Related to blueprint compute-manager-objects-juno Change-Id: Ia3eb1694f3708aeceb4c77876a210a7cb6532be1 --- .../compute/contrib/virtual_interfaces.py | 3 ++- nova/network/api.py | 20 +++++++------- .../contrib/test_virtual_interfaces.py | 2 +- nova/tests/network/test_api.py | 26 +++++++++---------- 4 files changed, 25 insertions(+), 26 deletions(-) diff --git a/nova/api/openstack/compute/contrib/virtual_interfaces.py b/nova/api/openstack/compute/contrib/virtual_interfaces.py index a891ce6741..d071df461e 100644 --- a/nova/api/openstack/compute/contrib/virtual_interfaces.py +++ b/nova/api/openstack/compute/contrib/virtual_interfaces.py @@ -59,7 +59,8 @@ def __init__(self): def _items(self, req, server_id, entity_maker): """Returns a list of VIFs, transformed through entity_maker.""" context = req.environ['nova.context'] - instance = common.get_instance(self.compute_api, context, server_id) + instance = common.get_instance(self.compute_api, context, server_id, + want_objects=True) vifs = self.network_api.get_vifs_by_instance(context, instance) limited_list = common.limited(vifs, req) diff --git a/nova/network/api.py b/nova/network/api.py index a8ef7b294b..13cd114d54 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -143,23 +143,23 @@ def get_instance_id_by_floating_address(self, context, address): @wrap_check_policy def get_vifs_by_instance(self, context, instance): - vifs = self.db.virtual_interface_get_by_instance(context, - instance['uuid']) + vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, + instance.uuid) for vif in vifs: - if vif.get('network_id') is not None: - network = objects.Network.get_by_id(context, vif['network_id'], + if vif.network_id is not None: + network = objects.Network.get_by_id(context, vif.network_id, project_only='allow_none') - vif['net_uuid'] = network.uuid + vif.net_uuid = network.uuid return vifs @wrap_check_policy def get_vif_by_mac_address(self, context, mac_address): - vif = self.db.virtual_interface_get_by_address(context, - mac_address) - if vif.get('network_id') is not None: - network = objects.Network.get_by_id(context, vif['network_id'], + vif = objects.VirtualInterface.get_by_address(context, + mac_address) + if vif.network_id is not None: + network = objects.Network.get_by_id(context, vif.network_id, project_only='allow_none') - vif['net_uuid'] = network.uuid + vif.net_uuid = network.uuid return vif @wrap_check_policy diff --git a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py index 59947e5922..a14e312cc9 100644 --- a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py @@ -82,7 +82,7 @@ def test_vif_instance_not_found(self): compute_api.API.get(fake_context, 'fake_uuid', expected_attrs=None, - want_objects=False).AndRaise( + want_objects=True).AndRaise( exception.InstanceNotFound(instance_id='instance-0000')) self.mox.ReplayAll() diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index 5b2c76437a..273b2d7415 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -37,6 +37,7 @@ from nova.tests import fake_instance from nova.tests.objects import test_fixed_ip from nova.tests.objects import test_flavor +from nova.tests.objects import test_virtual_interface from nova import utils FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16' @@ -103,39 +104,36 @@ def test_get(self, mock_get): def test_get_vifs_by_instance(self, mock_get_by_instance, mock_get_by_id): mock_get_by_instance.return_value = [ - {'network_id': mock.sentinel.network_id}] + dict(test_virtual_interface.fake_vif, + network_id=123)] mock_get_by_id.return_value = objects.Network() mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid instance = objects.Instance(uuid=mock.sentinel.inst_uuid) vifs = self.network_api.get_vifs_by_instance(self.context, instance) self.assertEqual(1, len(vifs)) - self.assertEqual({'network_id': mock.sentinel.network_id, - 'net_uuid': str(mock.sentinel.network_uuid)}, - vifs[0]) + self.assertEqual(123, vifs[0].network_id) + self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid) mock_get_by_instance.assert_called_once_with( - self.context, str(mock.sentinel.inst_uuid)) - mock_get_by_id.assert_called_once_with(self.context, - mock.sentinel.network_id, + self.context, str(mock.sentinel.inst_uuid), use_slave=False) + mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') @mock.patch('nova.objects.Network.get_by_id') @mock.patch('nova.db.virtual_interface_get_by_address') def test_get_vif_by_mac_address(self, mock_get_by_address, mock_get_by_id): - mock_get_by_address.return_value = { - 'network_id': mock.sentinel.network_id} + mock_get_by_address.return_value = dict( + test_virtual_interface.fake_vif, network_id=123) mock_get_by_id.return_value = objects.Network( uuid=mock.sentinel.network_uuid) vif = self.network_api.get_vif_by_mac_address(self.context, mock.sentinel.mac) - self.assertEqual({'network_id': mock.sentinel.network_id, - 'net_uuid': str(mock.sentinel.network_uuid)}, - vif) + self.assertEqual(123, vif.network_id) + self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid) mock_get_by_address.assert_called_once_with(self.context, mock.sentinel.mac) - mock_get_by_id.assert_called_once_with(self.context, - mock.sentinel.network_id, + mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') def test_allocate_for_instance_handles_macs_passed(self): From 9f2658d8cf0b805560169d457ac83d10325f24f2 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Mon, 16 Jun 2014 09:26:39 +0200 Subject: [PATCH 338/486] Consistently use jsonutils instead of specific implementation jsonutils have several benefits in comparison to pure json implementation, like enabling C boosted encoders and decoders for Python2.6 by using simplejson when available. Change-Id: I24d0cd442e8d9d89fac50e43fc97f7bb4a293c3d Closes-Bug: 1329496 --- nova/api/metadata/base.py | 8 ++++---- .../openstack/compute/contrib/security_groups.py | 4 ++-- .../compute/plugins/v3/security_groups.py | 5 ++--- nova/image/glance.py | 3 +-- nova/scheduler/scheduler_options.py | 4 ++-- .../contrib/test_server_external_events.py | 5 ++--- .../plugins/v3/test_server_external_events.py | 5 ++--- nova/tests/api/test_auth.py | 7 +++---- nova/tests/integrated/test_api_samples.py | 5 ++--- .../integrated/v3/test_console_auth_tokens.py | 4 ++-- nova/tests/policy_fixture.py | 6 +++--- nova/tests/test_metadata.py | 15 ++++++++------- nova/tests/virt/libvirt/test_imagecache.py | 4 ++-- nova/virt/libvirt/imagecache.py | 3 +-- nova/virt/storage_users.py | 8 ++++---- 15 files changed, 40 insertions(+), 46 deletions(-) diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py index 7d77807dba..86318ceb37 100644 --- a/nova/api/metadata/base.py +++ b/nova/api/metadata/base.py @@ -17,7 +17,6 @@ """Instance Metadata information.""" import base64 -import json import os import posixpath @@ -33,6 +32,7 @@ from nova import objects from nova.objects import base as obj_base from nova.openstack.common import importutils +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import utils @@ -327,7 +327,7 @@ def _metadata_as_json(self, version, path): metadata['random_seed'] = base64.b64encode(os.urandom(512)) self.set_mimetype(MIME_TYPE_APPLICATION_JSON) - return json.dumps(metadata) + return jsonutils.dumps(metadata) def _handle_content(self, path_tokens): if len(path_tokens) == 1: @@ -361,7 +361,7 @@ def _password(self, version, path): def _vendor_data(self, version, path): if self._check_os_version(HAVANA, version): self.set_mimetype(MIME_TYPE_APPLICATION_JSON) - return json.dumps(self.vddriver.get()) + return jsonutils.dumps(self.vddriver.get()) raise KeyError(path) def _check_version(self, required, requested, versions=VERSIONS): @@ -440,7 +440,7 @@ def metadata_for_config_drive(self): pass filepath = os.path.join('ec2', version, 'meta-data.json') - yield (filepath, json.dumps(data['meta-data'])) + yield (filepath, jsonutils.dumps(data['meta-data'])) ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"] for version in ALL_OPENSTACK_VERSIONS: diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py index 06e27580b3..415fcfed9e 100644 --- a/nova/api/openstack/compute/contrib/security_groups.py +++ b/nova/api/openstack/compute/contrib/security_groups.py @@ -17,7 +17,6 @@ """The security groups extension.""" import contextlib -import json import webob from webob import exc @@ -32,6 +31,7 @@ from nova.i18n import _ from nova.network.security_group import neutron_driver from nova.network.security_group import openstack_driver +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import xmlutils from nova.virt import netutils @@ -565,7 +565,7 @@ def _extend_servers(self, req, servers): else: try: # try converting to json - req_obj = json.loads(req.body) + req_obj = jsonutils.loads(req.body) # Add security group to server, if no security group was in # request add default since that is the group it is part of servers[0][key] = req_obj['server'].get( diff --git a/nova/api/openstack/compute/plugins/v3/security_groups.py b/nova/api/openstack/compute/plugins/v3/security_groups.py index 08aa949d04..19bc5fa385 100644 --- a/nova/api/openstack/compute/plugins/v3/security_groups.py +++ b/nova/api/openstack/compute/plugins/v3/security_groups.py @@ -16,8 +16,6 @@ """The security groups extension.""" -import json - from nova.api.openstack.compute.schemas.v3 import security_groups as \ schema_security_groups from nova.api.openstack import extensions @@ -27,6 +25,7 @@ from nova import exception from nova.network.security_group import neutron_driver from nova.network.security_group import openstack_driver +from nova.openstack.common import jsonutils ALIAS = 'os-security-groups' @@ -81,7 +80,7 @@ def _extend_servers(self, req, servers): # one server in an API request. else: # try converting to json - req_obj = json.loads(req.body) + req_obj = jsonutils.loads(req.body) # Add security group to server, if no security group was in # request add default since that is the group it is part of servers[0][ATTRIBUTE_NAME] = req_obj['server'].get( diff --git a/nova/image/glance.py b/nova/image/glance.py index 1f2a0664e4..fccbafa55b 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -19,7 +19,6 @@ import copy import itertools -import json import random import sys import time @@ -126,7 +125,7 @@ def generate_identity_headers(context, status='Confirmed'): 'X-Tenant-Id': getattr(context, 'tenant', None), 'X-Roles': ','.join(context.roles), 'X-Identity-Status': status, - 'X-Service-Catalog': json.dumps(context.service_catalog), + 'X-Service-Catalog': jsonutils.dumps(context.service_catalog), } diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py index 48019d5187..c0cf848d19 100644 --- a/nova/scheduler/scheduler_options.py +++ b/nova/scheduler/scheduler_options.py @@ -21,13 +21,13 @@ """ import datetime -import json import os from oslo.config import cfg from nova.i18n import _ from nova.openstack.common import excutils +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils @@ -73,7 +73,7 @@ def _get_file_timestamp(self, filename): def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: - return json.load(handle) + return jsonutils.load(handle) except ValueError as e: LOG.exception(_("Could not decode scheduler options: '%s'"), e) return {} diff --git a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py index c2852fdde4..5439ce79db 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py @@ -12,8 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import mock import webob @@ -21,6 +19,7 @@ from nova import context from nova import exception from nova import objects +from nova.openstack.common import jsonutils from nova import test fake_instances = { @@ -71,7 +70,7 @@ def _create_req(self, body): req.method = 'POST' req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - req.body = json.dumps(body) + req.body = jsonutils.dumps(body) return req def _assert_call(self, req, body, expected_uuids, expected_events): diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py index 882a485984..52ce2941a0 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py @@ -12,8 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import json - import mock import webob @@ -21,6 +19,7 @@ from nova import context from nova import exception from nova import objects +from nova.openstack.common import jsonutils from nova import test fake_instances = { @@ -66,7 +65,7 @@ def _create_req(self, body): req.method = 'POST' req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - req.body = json.dumps(body) + req.body = jsonutils.dumps(body) return req def _assert_call(self, req, body, expected_uuids, expected_events): diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py index 992ba48942..8505e381e6 100644 --- a/nova/tests/api/test_auth.py +++ b/nova/tests/api/test_auth.py @@ -12,14 +12,13 @@ # License for the specific language governing permissions and limitations # under the License. -import json - from oslo.config import cfg import webob import webob.exc import nova.api.auth from nova.i18n import _ +from nova.openstack.common import jsonutils from nova.openstack.common.middleware import request_id from nova import test @@ -41,7 +40,7 @@ def fake_app(req): self.request = webob.Request.blank('/') self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' - self.request.headers['X_SERVICE_CATALOG'] = json.dumps({}) + self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({}) def test_no_user_or_user_id(self): response = self.request.get_response(self.middleware) @@ -102,7 +101,7 @@ def role_check_app(req): self.request.headers['X_USER'] = 'testuser' self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' - self.request.headers['X_SERVICE_CATALOG'] = json.dumps({}) + self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({}) self.roles = "pawn, knight, rook" diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 0783abfa38..4707a74889 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -17,7 +17,6 @@ import copy import datetime import inspect -import json import os import re import urllib @@ -2128,7 +2127,7 @@ class ConsoleAuthTokensSampleJsonTests(ServersSampleBase): "Console_auth_tokens") def _get_console_url(self, data): - return json.loads(data)["console"]["url"] + return jsonutils.loads(data)["console"]["url"] def _get_console_token(self, uuid): response = self._do_post('servers/%s/action' % uuid, @@ -2600,7 +2599,7 @@ def test_create_network(self): def test_delete_network(self): response = self._do_post('os-tenant-networks', "networks-post-req", {}) - net = json.loads(response.read()) + net = jsonutils.loads(response.read()) response = self._do_delete('os-tenant-networks/%s' % net["network"]["id"]) self.assertEqual(response.status, 202) diff --git a/nova/tests/integrated/v3/test_console_auth_tokens.py b/nova/tests/integrated/v3/test_console_auth_tokens.py index a7cc228a57..100bc84da9 100644 --- a/nova/tests/integrated/v3/test_console_auth_tokens.py +++ b/nova/tests/integrated/v3/test_console_auth_tokens.py @@ -12,9 +12,9 @@ # License for the specific language governing permissions and limitations # under the License. -import json import re +from nova.openstack.common import jsonutils from nova.tests.integrated.v3 import test_servers @@ -23,7 +23,7 @@ class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase): extra_extensions_to_load = ["os-remote-consoles"] def _get_console_url(self, data): - return json.loads(data)["console"]["url"] + return jsonutils.loads(data)["console"]["url"] def _get_console_token(self, uuid): response = self._do_post('servers/%s/action' % uuid, diff --git a/nova/tests/policy_fixture.py b/nova/tests/policy_fixture.py index 8f7e7206fd..3da8cc7d8a 100644 --- a/nova/tests/policy_fixture.py +++ b/nova/tests/policy_fixture.py @@ -12,12 +12,12 @@ # License for the specific language governing permissions and limitations # under the License. -import json import os import fixtures from oslo.config import cfg +from nova.openstack.common import jsonutils from nova.openstack.common import policy as common_policy import nova.policy from nova.tests import fake_policy @@ -56,7 +56,7 @@ def setUp(self): allow users of the specified role only """ super(RoleBasedPolicyFixture, self).setUp() - policy = json.load(open(CONF.policy_file)) + policy = jsonutils.load(open(CONF.policy_file)) # Convert all actions to require specified role for action, rule in policy.iteritems(): @@ -66,7 +66,7 @@ def setUp(self): self.policy_file_name = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file_name, 'w') as policy_file: - json.dump(policy, policy_file) + jsonutils.dump(policy, policy_file) CONF.set_override('policy_file', self.policy_file_name) nova.policy.reset() nova.policy.init() diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index 8ea9d3b1fa..7bf28e1131 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -19,7 +19,6 @@ import base64 import hashlib import hmac -import json import re try: @@ -43,6 +42,7 @@ from nova import exception from nova.network import api as network_api from nova import objects +from nova.openstack.common import jsonutils from nova import test from nova.tests import fake_block_device from nova.tests import fake_instance @@ -417,7 +417,7 @@ def test_metadata_json(self): mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") mdjson = mdinst.lookup("/openstack/latest/meta_data.json") - mddict = json.loads(mdjson) + mddict = jsonutils.loads(mdjson) self.assertEqual(mddict['uuid'], self.instance['uuid']) self.assertIn('files', mddict) @@ -447,7 +447,7 @@ def test_extra_md(self): mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra) mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") - mddict = json.loads(mdjson) + mddict = jsonutils.loads(mdjson) for key, val in extra.iteritems(): self.assertEqual(mddict[key], val) @@ -485,20 +485,21 @@ def test_random_seed(self): # verify that 2013-04-04 has the 'random' field mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json") - mddict = json.loads(mdjson) + mddict = jsonutils.loads(mdjson) self.assertIn("random_seed", mddict) self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512) # verify that older version do not have it mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") - self.assertNotIn("random_seed", json.loads(mdjson)) + self.assertNotIn("random_seed", jsonutils.loads(mdjson)) def test_no_dashes_in_metadata(self): # top level entries in meta_data should not contain '-' in their name inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) - mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json")) + mdjson = jsonutils.loads( + mdinst.lookup("/openstack/latest/meta_data.json")) self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1]) @@ -534,7 +535,7 @@ def get(self): # verify that 2013-10-17 has the vendor_data.json file vdpath = "/openstack/2013-10-17/vendor_data.json" - vd = json.loads(mdinst.lookup(vdpath)) + vd = jsonutils.loads(mdinst.lookup(vdpath)) # the instance should be passed through, and our class copies the # uuid through to 'inst_uuid'. diff --git a/nova/tests/virt/libvirt/test_imagecache.py b/nova/tests/virt/libvirt/test_imagecache.py index 5c05f4a14e..1045f8e3bb 100644 --- a/nova/tests/virt/libvirt/test_imagecache.py +++ b/nova/tests/virt/libvirt/test_imagecache.py @@ -17,7 +17,6 @@ import contextlib import cStringIO import hashlib -import json import os import time @@ -26,6 +25,7 @@ from nova import conductor from nova import db from nova.openstack.common import importutils +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import test @@ -501,7 +501,7 @@ def test_handle_base_image_checksum_fails(self): d = {'sha1': '21323454'} with open('%s.info' % fname, 'w') as f: - f.write(json.dumps(d)) + f.write(jsonutils.dumps(d)) image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.unexplained_images = [fname] diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index cd17d11e2c..fa8e460ded 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -21,7 +21,6 @@ """ import hashlib -import json import os import re import time @@ -206,7 +205,7 @@ def write_file(info_file, field, value): d['%s-timestamp' % field] = time.time() with open(info_file, 'w') as f: - f.write(json.dumps(d)) + f.write(jsonutils.dumps(d)) write_file(info_file, field, value) diff --git a/nova/virt/storage_users.py b/nova/virt/storage_users.py index 58e7f58498..edb0215f8d 100644 --- a/nova/virt/storage_users.py +++ b/nova/virt/storage_users.py @@ -13,13 +13,13 @@ # under the License. -import json import os import time from oslo.config import cfg from nova.i18n import _ +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils @@ -59,7 +59,7 @@ def do_register_storage_use(storage_path, hostname): if os.path.exists(id_path): with open(id_path) as f: try: - d = json.loads(f.read()) + d = jsonutils.loads(f.read()) except ValueError: LOG.warning(_("Cannot decode JSON from %(id_path)s"), {"id_path": id_path}) @@ -67,7 +67,7 @@ def do_register_storage_use(storage_path, hostname): d[hostname] = time.time() with open(id_path, 'w') as f: - f.write(json.dumps(d)) + f.write(jsonutils.dumps(d)) return do_register_storage_use(storage_path, hostname) @@ -97,7 +97,7 @@ def do_get_storage_users(storage_path): if os.path.exists(id_path): with open(id_path) as f: try: - d = json.loads(f.read()) + d = jsonutils.loads(f.read()) except ValueError: LOG.warning(_("Cannot decode JSON from %(id_path)s"), {"id_path": id_path}) From 243879f5c51fc45f03491bcb78765945ddf76be8 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 1 Aug 2014 15:42:51 +0200 Subject: [PATCH 339/486] Added hacking check for jsonutils jsonutils provides some additional features in comparison to pure json or simplejson modules. For example, on Python 2.6, it automatically switches to simplejson that provides significant performance boost. So let's enforce usage of the module in replacement to stdlib json. Change-Id: I86ed6cd3316dd4da5e1b10b36a3ddba3739316d3 --- nova/hacking/checks.py | 18 ++++++++++++++++++ nova/tests/test_hacking.py | 21 +++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py index 707e629d51..a1dd61419a 100644 --- a/nova/hacking/checks.py +++ b/nova/hacking/checks.py @@ -286,6 +286,23 @@ def check_explicit_underscore_import(logical_line, filename): yield(0, "N323: Found use of _() without explicit import of _ !") +def use_jsonutils(logical_line, filename): + # the code below that path is not meant to be executed from neutron + # tree where jsonutils module is present, so don't enforce its usage + # for this subdirectory + if "plugins/xenserver" in filename: + return + + msg = "N323: jsonutils.%(fun)s must be used instead of json.%(fun)s" + + if "json." in logical_line: + json_funcs = ['dumps', 'dump', 'loads', 'load'] + for f in json_funcs: + pos = logical_line.find('json.%s' % f) + if pos != -1: + return (pos, msg % {'fun': f}) + + def factory(register): register(import_no_db_in_virt) register(no_db_session_in_public_api) @@ -303,3 +320,4 @@ def factory(register): register(validate_log_translations) register(no_mutable_default_args) register(check_explicit_underscore_import) + register(use_jsonutils) diff --git a/nova/tests/test_hacking.py b/nova/tests/test_hacking.py index 2db0936680..f6f796b138 100644 --- a/nova/tests/test_hacking.py +++ b/nova/tests/test_hacking.py @@ -217,3 +217,24 @@ def test_check_explicit_underscore_import(self): self.assertEqual(len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files3.py"))), 0) + + def test_use_jsonutils(self): + def __get_msg(fun): + msg = ("N323: jsonutils.%(fun)s must be used instead of " + "json.%(fun)s" % {'fun': fun}) + return (0, msg) + + for method in ('dump', 'dumps', 'load', 'loads'): + self.assertEqual( + __get_msg(method), + checks.use_jsonutils("json.%s" % method, + "./nova/virt/xenapi/driver.py")) + self.assertIsNone( + checks.use_jsonutils("json.%s" % method, + "./plugins/xenserver/script.py")) + self.assertIsNone( + checks.use_jsonutils("jsonx.%s" % method, + "./nova/virt/xenapi/driver.py")) + self.assertIsNone( + checks.use_jsonutils("json.dumb", + "./nova/virt/xenapi/driver.py")) From f17ad7b933457f4a1b35fed9bbe252f55ebf29ae Mon Sep 17 00:00:00 2001 From: Christopher Lefelhocz Date: Fri, 25 Jul 2014 11:20:57 -0500 Subject: [PATCH 340/486] Fix rebuild with cells When rebuild_instances switched to a compute task it broke cells since the cast was dropped as it didn't match. This change moves the rebuild_instances to conductor and also adds some unit tests checks on the compute task integration point. Change-Id: Ic1c575fbca72f1cd34b1dbc6f82023e5b2b392e2 Closes-Bug: 1348642 --- nova/compute/cells_api.py | 5 ++-- nova/tests/compute/test_compute_cells.py | 32 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py index 4416ffacea..4abbd3625c 100644 --- a/nova/compute/cells_api.py +++ b/nova/compute/cells_api.py @@ -49,8 +49,7 @@ class ComputeRPCAPIRedirect(object): 'unpause_instance', 'revert_resize', 'confirm_resize', 'reset_network', 'inject_network_info', - 'backup_instance', 'snapshot_instance', - 'rebuild_instance'] + 'backup_instance', 'snapshot_instance'] def __init__(self, cells_rpcapi): self.cells_rpcapi = cells_rpcapi @@ -70,7 +69,7 @@ class ConductorTaskRPCAPIRedirect(object): # is for transitioning to a common interface where we can just # swap out the compute_task_rpcapi class with the cells_rpcapi class. cells_compatible = ['build_instances', 'resize_instance', - 'live_migrate_instance'] + 'live_migrate_instance', 'rebuild_instance'] def __init__(self, cells_rpcapi_obj): self.cells_rpcapi = cells_rpcapi_obj diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py index 69d611f86c..2bf6c7a5c8 100644 --- a/nova/tests/compute/test_compute_cells.py +++ b/nova/tests/compute/test_compute_cells.py @@ -274,6 +274,38 @@ def test_live_migrate_instance(self, instance_save): self.assertTrue(self.cells_rpcapi.live_migrate_instance.called) + @mock.patch.object(objects.Instance, 'save') + @mock.patch.object(objects.Instance, 'get_flavor') + @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') + @mock.patch.object(compute_api.API, '_get_image') + @mock.patch.object(compute_api.API, '_check_auto_disk_config') + @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild') + @mock.patch.object(compute_api.API, '_record_action_start') + def test_rebuild_instance(self, _record_action_start, + _checks_for_create_and_rebuild, _check_auto_disk_config, + _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save): + orig_system_metadata = {} + instance = fake_instance.fake_instance_obj(self.context, + vm_state=vm_states.ACTIVE, cell_name='fake-cell', + launched_at=timeutils.utcnow(), + system_metadata=orig_system_metadata, + expected_attrs=['system_metadata']) + get_flavor.return_value = '' + image_href = '' + image = {"min_ram": 10, "min_disk": 1, + "properties": {'architecture': 'x86_64'}} + admin_pass = '' + files_to_inject = [] + bdms = [] + + _get_image.return_value = (None, image) + bdm_get_by_instance_uuid.return_value = bdms + + self.compute_api.rebuild(self.context, instance, image_href, + admin_pass, files_to_inject) + + self.assertTrue(self.cells_rpcapi.rebuild_instance.called) + def test_check_equal(self): task_api = self.compute_api.compute_task_api tests = set() From 5b27fe7de22aef53b82402f15b076887bc52670a Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 17 Jun 2014 19:51:40 +0000 Subject: [PATCH 341/486] libvirt: Allow specification of default machine type The libvirt driver currently does not set the machine type for a KVM guest by default. When not specified, libvirt will use the newest one it knows about. Unfortunately, that can result in live migrations failing if your environment is using different versions of the host OS on compute nodes as the destination node may not be able to support the machine type used when the VM was originally started. A simple solution to this is to provide a new option which allows you to specify the default machine type on a per compute node basis (nova.conf option). By using this option, you can ensure that VMs are started with a machine type that will allow it to be live migrated to other nodes in the deployment. This patch implements that solution by adding the hw_machine_type option to the [libvirt] group of nova.conf. DocImpact Closes-bug: #1331170 Change-Id: I223c70c729315b6ffc01eb293fe70553ef827162 --- nova/tests/virt/libvirt/test_driver.py | 45 ++++++++++++++++++++ nova/virt/libvirt/driver.py | 58 +++++++++++++++++++------- 2 files changed, 87 insertions(+), 16 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index b1b80437a0..a5d82ef612 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -2398,6 +2398,51 @@ def test_get_guest_config_machine_type_through_image_meta(self): image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") + def test_get_guest_config_machine_type_from_config(self): + self.flags(virt_type='kvm', group='libvirt') + self.flags(hw_machine_type=['x86_64=fake_machine_type'], + group='libvirt') + + def fake_getCapabilities(): + return """ + + + cef19ce0-0ca2-11df-855d-b19fbce37686 + + x86_64 + Penryn + Intel + + + + + + """ + + def fake_baselineCPU(cpu, flag): + return """ + Penryn + Intel + + + """ + + # Make sure the host arch is mocked as x86_64 + self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities, + baselineCPU=fake_baselineCPU, + getVersion=lambda: 1005001) + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance_ref) + + cfg = conn._get_guest_config(instance_ref, + _fake_network_info(self.stubs, 1), + {}, disk_info) + self.assertEqual(cfg.os_mach_type, "fake_machine_type") + def _test_get_guest_config_ppc64(self, device_index): """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config. """ diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 0baf1bd3b7..dc2717c8c5 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -219,6 +219,14 @@ help='A path to a device that will be used as source of ' 'entropy on the host. Permitted options are: ' '/dev/random or /dev/hwrng'), + cfg.ListOpt('hw_machine_type', + help='For qemu or KVM guests, set this option to specify ' + 'a default machine type per host architecture. ' + 'You can find a list of supported machine types ' + 'in your environment by checking the output of ' + 'the "virsh capabilities"command. The format of the ' + 'value for this config option is host-arch=machine-type. ' + 'For example: x86_64=machinetype1,armv7l=machinetype2'), ] CONF = cfg.CONF @@ -3188,6 +3196,39 @@ def _get_guest_config_meta(self, context, instance, flavor): return meta + def _machine_type_mappings(self): + mappings = {} + for mapping in CONF.libvirt.hw_machine_type: + host_arch, _, machine_type = mapping.partition('=') + mappings[host_arch] = machine_type + return mappings + + def _get_machine_type(self, image_meta, caps): + # The underlying machine type can be set as an image attribute, + # or otherwise based on some architecture specific defaults + + mach_type = None + + if (image_meta is not None and image_meta.get('properties') and + image_meta['properties'].get('hw_machine_type') + is not None): + mach_type = image_meta['properties']['hw_machine_type'] + else: + # For ARM systems we will default to vexpress-a15 for armv7 + # and virt for aarch64 + if caps.host.cpu.arch == "armv7l": + mach_type = "vexpress-a15" + + if caps.host.cpu.arch == "aarch64": + mach_type = "virt" + + # If set in the config, use that as the default. + if CONF.libvirt.hw_machine_type: + mappings = self._machine_type_mappings() + mach_type = mappings.get(caps.host.cpu.arch) + + return mach_type + def _get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None, context=None): @@ -3263,22 +3304,7 @@ def _get_guest_config(self, instance, network_info, image_meta, if caps.host.cpu.arch in ("i686", "x86_64"): guest.sysinfo = self._get_guest_config_sysinfo(instance) guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS() - - # The underlying machine type can be set as an image attribute, - # or otherwise based on some architecture specific defaults - if (image_meta is not None and image_meta.get('properties') and - image_meta['properties'].get('hw_machine_type') - is not None): - guest.os_mach_type = \ - image_meta['properties']['hw_machine_type'] - else: - # For ARM systems we will default to vexpress-a15 for armv7 - # and virt for aarch64 - if caps.host.cpu.arch == "armv7l": - guest.os_mach_type = "vexpress-a15" - - if caps.host.cpu.arch == "aarch64": - guest.os_mach_type = "virt" + guest.os_mach_type = self._get_machine_type(image_meta, caps) if CONF.libvirt.virt_type == "lxc": guest.os_init_path = "/sbin/init" From d449b5d8556e67be08e016c94f9b1c523a69ce7e Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 12 Aug 2014 08:14:16 -0700 Subject: [PATCH 342/486] Neutron v2 API: fix get_floating_ip_pools Commit 7254f9b9dfbadadeb3aeda5d02bf37bfeb65e72d changed this method in nova.network.api in a way that it now returns simply a list of floating ip pool names, rather a list of dicts in the form {'name': 'pool_name'} The implementation of get_floating_ip_pools for neutron needs to be changed accordingly. Otherwise nova's floating ip extension will return a dict as floating ip pool names when neutron is enabled. Change-Id: I27001c9f17dad4dc4ca8cd032e689736553d8225 Closes-Bug: #1355882 --- nova/network/neutronv2/api.py | 6 ++++-- nova/tests/network/test_neutronv2.py | 3 +-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 77dab523b5..29975e0db0 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -940,10 +940,12 @@ def _get_floating_ip_pools(self, client, project_id=None): return data['networks'] def get_floating_ip_pools(self, context): - """Return floating ip pools.""" + """Return floating ip pool names.""" client = neutronv2.get_client(context) pools = self._get_floating_ip_pools(client) - return [{'name': n['name'] or n['id']} for n in pools] + # Note(salv-orlando): Return a list of names to be consistent with + # nova.network.api.get_floating_ip_pools + return [n['name'] or n['id'] for n in pools] def _format_floating_ip_model(self, fip, pool_dict, port_dict): pool = pool_dict[fip['floating_network_id']] diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 98f8933b62..3f33c5c216 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -1732,8 +1732,7 @@ def test_get_floating_ip_pools(self): AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]}) self.mox.ReplayAll() pools = api.get_floating_ip_pools(self.context) - expected = [{'name': self.fip_pool['name']}, - {'name': self.fip_pool_nova['name']}] + expected = [self.fip_pool['name'], self.fip_pool_nova['name']] self.assertEqual(expected, pools) def _get_expected_fip_model(self, fip_data, idx=0): From aeee162ea3611d9128e78d0589215864c8f48c06 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 11 Aug 2014 12:32:48 -0700 Subject: [PATCH 343/486] Drop instance_group_metadata from the database This completes the instance_group metadataectomy started in the previous patch. Per recent discussions, this decouples the code change from the actual schema change, allowing them to (potentially) be run at different times. Change-Id: I6c3ff35f6e90fbed8b1fd727eb3b0b0ee140abc4 --- .../250_remove_instance_groups_metadata.py | 71 +++++++++++++++++++ nova/tests/db/test_migrations.py | 8 +++ 2 files changed, 79 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py new file mode 100644 index 0000000000..667baae823 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py @@ -0,0 +1,71 @@ +# Copyright 2014 Red Hat, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import MetaData, Table, Column, DateTime, Integer, String, \ + ForeignKey + + +def upgrade(migrate_engine): + """Remove the instance_group_metadata table.""" + meta = MetaData(bind=migrate_engine) + + if migrate_engine.has_table('instance_group_metadata'): + group_metadata = Table('instance_group_metadata', meta, autoload=True) + group_metadata.drop() + + if migrate_engine.has_table('shadow_instance_group_metadata'): + shadow_group_metadata = Table('shadow_instance_group_metadata', meta, + autoload=True) + shadow_group_metadata.drop() + + +def downgrade(migrate_engine): + """Revert removal of the instance_group_metadata table.""" + meta = MetaData(bind=migrate_engine) + Table('instance_groups', meta, autoload=True) + Table('shadow_instance_groups', meta, autoload=True) + + if not migrate_engine.has_table('instance_group_metadata'): + group_metadata = Table('instance_group_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Integer), + Column('id', Integer, primary_key=True, nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + Column('group_id', Integer, ForeignKey('instance_groups.id'), + nullable=False), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + group_metadata.create() + if not migrate_engine.has_table('shadow_instance_group_metadata'): + shadow_group_metadata = Table('shadow_instance_group_metadata', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Integer), + Column('id', Integer, primary_key=True, nullable=False), + Column('key', String(length=255)), + Column('value', String(length=255)), + Column('group_id', Integer, + ForeignKey('shadow_instance_groups.id'), + nullable=False), + mysql_engine='InnoDB', + mysql_charset='utf8', + ) + shadow_group_metadata.create() diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index 603d5fcee6..5a6d0da8bb 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -764,6 +764,14 @@ def _post_downgrade_249(self, engine): if [c.name for c in i.columns] == ['instance_uuid', 'device_name']])) + def _check_250(self, engine, data): + self.assertTableNotExists(engine, 'instance_group_metadata') + self.assertTableNotExists(engine, 'shadow_instance_group_metadata') + + def _post_downgrade_250(self, engine): + oslodbutils.get_table(engine, 'instance_group_metadata') + oslodbutils.get_table(engine, 'shadow_instance_group_metadata') + class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" From d8a18d72e046efa2785b1351a00476adb9343b57 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 12 Aug 2014 07:58:54 -0700 Subject: [PATCH 344/486] VMware: fix crash when VC driver boots Commit 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b caused the driver to break. This adds the missing method. Change-Id: Iaf7e33a868724efdcb13fdd9ba1c74b92d2e6210 Closes-bug: #1355875 --- nova/virt/vmwareapi/driver.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 0adb52de97..bb69935293 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -164,6 +164,11 @@ def __init__(self, virtapi, scheme="https"): self._volumeops = self._resources.get(first_cluster).get('volumeops') self._vc_state = self._resources.get(first_cluster).get('vcstate') + def init_host(self, host): + vim = self._session.vim + if vim is None: + self._session._create_session() + def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" From e7840303a683d91b01a8c73328f93419c41d7815 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Thu, 7 Aug 2014 14:08:13 +0930 Subject: [PATCH 345/486] Make API name validation failure deterministic Makes the testing of a valid name in the API deterministic with respect to failing when an empty name is supplied by the user. Formerly it could fail either of two constraints - must be at least one character long - does not match the valid regexp which requires one character The lack of deterministic behaviour was causing issues with some unittests when PYTHONHASHSEED is not set to 0 because of the two failure modes. The second constraint for validation of names is now modified so it allows empty strings but overall the behaviour remains the same because of the first constraint. Change-Id: Iea89bb329a8939af5168926717b5c13bdf2a7f62 Partial-Bug: 1350287 --- nova/api/validation/parameter_types.py | 8 +++++++- .../api/openstack/compute/plugins/v3/test_keypairs.py | 2 +- nova/tests/test_api_validation.py | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py index 48829a7f7c..fc2d491a9a 100644 --- a/nova/api/validation/parameter_types.py +++ b/nova/api/validation/parameter_types.py @@ -49,7 +49,13 @@ 'type': 'string', 'minLength': 1, 'maxLength': 255, # NOTE: Allow to some spaces in middle of name. - 'pattern': '^(?! )[a-zA-Z0-9. _-]+(? Date: Mon, 14 Jul 2014 15:18:46 +0800 Subject: [PATCH 346/486] Resize block device after swap to larger volume After swap to larger volume, instance's block device should be resized. Otherwise, the guest can't utilize the extra space. This patch make the libvirt driver resize block device after mirror the volume. And add new parameter 'resize_to' to virt driver, that is used to indicate the new size. Change-Id: Ib4d65e8812c7d3c28100155124218c75a94e16e7 Closes-Bug: #1341459 --- nova/compute/manager.py | 8 +++++++- nova/tests/compute/test_compute_mgr.py | 12 +++++++++--- nova/tests/virt/libvirt/test_driver.py | 6 ++++-- nova/tests/virt/test_virt_drivers.py | 2 +- nova/virt/driver.py | 5 ++++- nova/virt/fake.py | 2 +- nova/virt/libvirt/driver.py | 25 ++++++++++++++++++------- 7 files changed, 44 insertions(+), 16 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 78ce537967..f9d9be23e5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4407,6 +4407,7 @@ def _swap_volume(self, context, instance, bdm, connector, old_volume_id, mountpoint = bdm['device_name'] failed = False new_cinfo = None + resize_to = 0 try: old_cinfo, new_cinfo = self._init_volume_connection(context, new_volume_id, @@ -4414,7 +4415,12 @@ def _swap_volume(self, context, instance, bdm, connector, old_volume_id, connector, instance, bdm) - self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint) + old_vol_size = self.volume_api.get(context, old_volume_id)['size'] + new_vol_size = self.volume_api.get(context, new_volume_id)['size'] + if new_vol_size > old_vol_size: + resize_to = new_vol_size + self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint, + resize_to) except Exception: # pylint: disable=W0702 failed = True with excutils.save_and_reraise_exception(): diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index d5ffb32056..191ae5327f 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -994,11 +994,13 @@ def test_swap_volume_volume_api_usage(self): old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', - 'status': 'detaching'} + 'status': 'detaching', + 'size': 1} new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', - 'status': 'available'} + 'status': 'available', + 'size': 2} def fake_vol_api_begin_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) @@ -1051,6 +1053,10 @@ def fake_vol_migrate_volume_completion(context, old_volume_id, def fake_func_exc(*args, **kwargs): raise AttributeError # Random exception + def fake_swap_volume(old_connection_info, new_connection_info, + instance, mountpoint, resize_to): + self.assertEqual(resize_to, 2) + self.stubs.Set(self.compute.volume_api, 'begin_detaching', fake_vol_api_begin_detaching) self.stubs.Set(self.compute.volume_api, 'roll_detaching', @@ -1071,7 +1077,7 @@ def fake_func_exc(*args, **kwargs): self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda x: {}) self.stubs.Set(self.compute.driver, 'swap_volume', - lambda w, x, y, z: None) + fake_swap_volume) self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion', fake_vol_migrate_volume_completion) self.stubs.Set(db, 'block_device_mapping_update', diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 76aa472750..e0a69b7cb8 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -8094,8 +8094,9 @@ def test_swap_volume(self): mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True + mock_dom.blockJobInfo.return_value = {} - drvr._swap_volume(mock_dom, srcfile, dstfile) + drvr._swap_volume(mock_dom, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( fakelibvirt.VIR_DOMAIN_XML_INACTIVE | @@ -8104,7 +8105,8 @@ def test_swap_volume(self): srcfile, dstfile, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) - + mock_dom.blockResize.assert_called_once_with( + srcfile, 1 * units.Gi / units.Ki) mock_define.assert_called_once_with(xmldoc) def test_live_snapshot(self): diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index be248556b6..9212807ac5 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -440,7 +440,7 @@ def test_swap_volume(self): self.connection.swap_volume({'driver_volume_type': 'fake'}, {'driver_volume_type': 'fake'}, instance_ref, - '/dev/sda')) + '/dev/sda', 2)) @catch_notimplementederror def test_attach_detach_different_power_states(self): diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 37338ac6f4..b16ff9938c 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -438,10 +438,13 @@ def detach_volume(self, connection_info, instance, mountpoint, raise NotImplementedError() def swap_volume(self, old_connection_info, new_connection_info, - instance, mountpoint): + instance, mountpoint, resize_to): """Replace the disk attached to the instance. :param instance: nova.objects.instance.Instance + :param resize_to: This parameter is used to indicate the new volume + size when the new volume lager than old volume. + And the units is Gigabyte. """ raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 5ffcbf8f36..6924b1424c 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -239,7 +239,7 @@ def detach_volume(self, connection_info, instance, mountpoint, pass def swap_volume(self, old_connection_info, new_connection_info, - instance, mountpoint): + instance, mountpoint, resize_to): """Replace the disk attached to the instance.""" instance_name = instance['name'] if instance_name not in self._mounts: diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 37351fd08e..4527a13c0b 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1293,7 +1293,7 @@ def attach_volume(self, context, connection_info, instance, mountpoint, with excutils.save_and_reraise_exception(): self._disconnect_volume(connection_info, disk_dev) - def _swap_volume(self, domain, disk_path, new_path): + def _swap_volume(self, domain, disk_path, new_path, resize_to): """Swap existing disk with a new block device.""" # Save a copy of the domain's persistent XML file xml = domain.XMLDesc( @@ -1326,11 +1326,19 @@ def _swap_volume(self, domain, disk_path, new_path): domain.blockJobAbort(disk_path, libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) + if resize_to: + # NOTE(alex_xu): domain.blockJobAbort isn't sync call. This + # is bug in libvirt. So we need waiting for the pivot is + # finished. libvirt bug #1119173 + while self._wait_for_block_job(domain, disk_path, + wait_for_job_clean=True): + time.sleep(0.5) + domain.blockResize(disk_path, resize_to * units.Gi / units.Ki) finally: self._conn.defineXML(xml) def swap_volume(self, old_connection_info, - new_connection_info, instance, mountpoint): + new_connection_info, instance, mountpoint, resize_to): instance_name = instance['name'] virt_dom = self._lookup_by_name(instance_name) disk_dev = mountpoint.rpartition("/")[2] @@ -1348,7 +1356,7 @@ def swap_volume(self, old_connection_info, self._disconnect_volume(new_connection_info, disk_dev) raise NotImplementedError(_("Swap only supports host devices")) - self._swap_volume(virt_dom, disk_dev, conf.source_path) + self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to) self._disconnect_volume(old_connection_info, disk_dev) @staticmethod @@ -1616,7 +1624,8 @@ def snapshot(self, context, instance, image_id, update_task_state): instance=instance) @staticmethod - def _wait_for_block_job(domain, disk_path, abort_on_error=False): + def _wait_for_block_job(domain, disk_path, abort_on_error=False, + wait_for_job_clean=False): """Wait for libvirt block job to complete. Libvirt may return either cur==end or an empty dict when @@ -1637,10 +1646,12 @@ def _wait_for_block_job(domain, disk_path, abort_on_error=False): except Exception: return False - if cur == end: - return False + if wait_for_job_clean: + job_ended = not status else: - return True + job_ended = cur == end + + return not job_ended def _live_snapshot(self, domain, disk_path, out_path, image_format): """Snapshot an instance without downtime.""" From d6e6c35ff653565aa65e049ed1de371235b261de Mon Sep 17 00:00:00 2001 From: Pawel Koniszewski Date: Wed, 13 Aug 2014 09:59:33 -0400 Subject: [PATCH 347/486] Split EC2 ID validator to validator per resource type. Now there is only one method to validate EC2 id: validate_ec2_id(). It's used to validate both the Instance ID as well as Volume ID for valid EC2 ID format. However exception class raised in both cases is "InvalidInstanceIDMalformed" and EC2 error code is InvalidInstanceID.Malformed. This patch addresses this problem by adding two new methods: * validate_instance_id() * validate_volume_id() These methods use validate_ec2_id() and raise appropriate exception as well as appropriate EC2 error code based on AWS EC2 Error Code documentation: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/api-error-codes.html Change-Id: I55591b8b643bb316e5001e645d71c13094106e96 Closes-Bug: #1199308 --- nova/api/ec2/__init__.py | 1 + nova/api/ec2/cloud.py | 45 ++++++++++++++++--------- nova/exception.py | 7 +++- nova/tests/api/ec2/test_ec2_validate.py | 2 +- 4 files changed, 38 insertions(+), 17 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 9b2241f927..6d9c3ab845 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -555,6 +555,7 @@ def __call__(self, req): exception.FloatingIpNotFound, exception.ImageNotActive, exception.InvalidInstanceIDMalformed, + exception.InvalidVolumeIDMalformed, exception.InvalidKeypair, exception.InvalidParameterValue, exception.InvalidPortRange, diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 31fd563f6d..498df433e2 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -86,13 +86,28 @@ QUOTAS = quota.QUOTAS -def validate_ec2_id(val): +# EC2 ID can return the following error codes: +# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/api-error-codes.html +# Validate methods are split to return valid EC2 error codes for different +# resource types +def _validate_ec2_id(val): if not validator.validate_str()(val): - raise exception.InvalidInstanceIDMalformed(val=val) + raise exception.InvalidEc2Id(ec2_id=val) + ec2utils.ec2_id_to_id(val) + + +def validate_volume_id(volume_id): + try: + _validate_ec2_id(volume_id) + except exception.InvalidEc2Id: + raise exception.InvalidVolumeIDMalformed(volume_id=volume_id) + + +def validate_instance_id(instance_id): try: - ec2utils.ec2_id_to_id(val) + _validate_ec2_id(instance_id) except exception.InvalidEc2Id: - raise exception.InvalidInstanceIDMalformed(val=val) + raise exception.InvalidInstanceIDMalformed(instance_id=instance_id) # EC2 API can return the following values as documented in the EC2 API @@ -388,7 +403,7 @@ def _format_snapshot(self, context, snapshot): return s def create_snapshot(self, context, volume_id, **kwargs): - validate_ec2_id(volume_id) + validate_volume_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) @@ -746,7 +761,7 @@ def get_password_data(self, context, instance_id, **kwargs): ec2_id = instance_id[0] else: ec2_id = instance_id - validate_ec2_id(ec2_id) + validate_instance_id(ec2_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) output = password.extract_password(instance) @@ -765,7 +780,7 @@ def get_console_output(self, context, instance_id, **kwargs): ec2_id = instance_id[0] else: ec2_id = instance_id - validate_ec2_id(ec2_id) + validate_instance_id(ec2_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid, want_objects=True) @@ -779,7 +794,7 @@ def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: - validate_ec2_id(ec2_id) + validate_volume_id(ec2_id) internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) @@ -859,7 +874,7 @@ def create_volume(self, context, **kwargs): return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): - validate_ec2_id(volume_id) + validate_volume_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) self.volume_api.delete(context, volume_id) return True @@ -868,8 +883,8 @@ def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - validate_ec2_id(instance_id) - validate_ec2_id(volume_id) + validate_instance_id(instance_id) + validate_volume_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id) instance = self.compute_api.get(context, instance_uuid, @@ -902,7 +917,7 @@ def _get_instance_from_volume(self, context, volume): raise exception.VolumeUnattached(volume_id=volume['id']) def detach_volume(self, context, volume_id, **kwargs): - validate_ec2_id(volume_id) + validate_volume_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) @@ -993,7 +1008,7 @@ def _format_attr_user_data(instance, result): if fn is None: raise exception.InvalidAttribute(attr=attribute) - validate_ec2_id(instance_id) + validate_instance_id(instance_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id) instance = self.compute_api.get(context, instance_uuid, want_objects=True) @@ -1425,7 +1440,7 @@ def _ec2_ids_to_instances(self, context, instance_id): instances = [] extra = ['system_metadata', 'metadata', 'info_cache'] for ec2_id in instance_id: - validate_ec2_id(ec2_id) + validate_instance_id(ec2_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = objects.Instance.get_by_uuid( context, instance_uuid, expected_attrs=extra) @@ -1703,7 +1718,7 @@ def create_image(self, context, instance_id, **kwargs): # do so here no_reboot = kwargs.get('no_reboot', False) name = kwargs.get('name') - validate_ec2_id(instance_id) + validate_instance_id(instance_id) ec2_instance_id = instance_id instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id) instance = self.compute_api.get(context, instance_uuid, diff --git a/nova/exception.py b/nova/exception.py index fe761b0fab..9b3d8626ca 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1267,8 +1267,13 @@ class MarkerNotFound(NotFound): class InvalidInstanceIDMalformed(Invalid): + msg_fmt = _("Invalid id: %(instance_id)s (expecting \"i-...\")") ec2_code = 'InvalidInstanceID.Malformed' - msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").") + + +class InvalidVolumeIDMalformed(Invalid): + msg_fmt = _("Invalid id: %(volume_id)s (expecting \"i-...\")") + ec2_code = 'InvalidVolumeID.Malformed' class CouldNotFetchImage(NovaException): diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py index f09de8958f..a058e46597 100644 --- a/nova/tests/api/ec2/test_ec2_validate.py +++ b/nova/tests/api/ec2/test_ec2_validate.py @@ -81,7 +81,7 @@ def dumb(*args, **kwargs): self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound) for x in self.EC2_VALID__IDS]) self.volume_id_exception_map = [(x, - exception.InvalidInstanceIDMalformed) + exception.InvalidVolumeIDMalformed) for x in self.EC2_MALFORMED_IDS] self.volume_id_exception_map.extend([(x, exception.VolumeNotFound) for x in self.EC2_VALID__IDS]) From b3edc9955d8c2fbe8c1eab9188b569d7687c0d1b Mon Sep 17 00:00:00 2001 From: Vladan Popovic Date: Mon, 7 Jul 2014 10:32:42 -0400 Subject: [PATCH 348/486] libvirt: add serial ports config This is the first patch for exposing serial ports on openstack instances. It contains setting up the instance XML config that creates a TCP chardevice in libvirt. Author: Ian Wells Co-autherd by: Vladan Popovic, Sushma Korati partialy implements blueprint serial-ports Change-Id: Ia77909dffe6b48be7e6642cbe8bbab27fb6a627d --- nova/tests/virt/libvirt/test_config.py | 12 ++++++++++++ nova/virt/libvirt/config.py | 7 +++++++ 2 files changed, 19 insertions(+) diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index f10e72d44f..5bfaacc68b 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -891,6 +891,18 @@ def test_config_file(self): """) + def test_config_serial_port(self): + obj = config.LibvirtConfigGuestSerial() + obj.type = "tcp" + obj.listen_port = 11111 + obj.listen_host = "0.0.0.0" + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + + """) + class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest): def test_config_pty(self): diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 012abdc3b7..7c0da7bc35 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -1245,16 +1245,23 @@ def __init__(self, **kwargs): self.type = "pty" self.source_path = None + self.listen_port = None + self.listen_host = None def format_dom(self): dev = super(LibvirtConfigGuestCharBase, self).format_dom() dev.set("type", self.type) + if self.type == "file": dev.append(etree.Element("source", path=self.source_path)) elif self.type == "unix": dev.append(etree.Element("source", mode="bind", path=self.source_path)) + elif self.type == "tcp": + dev.append(etree.Element("source", mode="bind", + host=self.listen_host, + service=str(self.listen_port))) return dev From 192d520a22155bd0a2553928da590e7ab8092292 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 12 Aug 2014 15:30:40 -0700 Subject: [PATCH 349/486] Fix 202 responses to contain valid content. Using a webob.exc to generate 202s makes the response return plain text in the body, which breaks things that expect to be able to parse data according to the content type. Using a webob.Response object makes sure the body of the 202 response is empty. Change-Id: Ifca3081f573407808a2196e71ac1db87180db6ec Resolves-bug: #1356058 --- .../compute/contrib/cloudpipe_update.py | 3 ++- nova/api/openstack/compute/contrib/fixed_ips.py | 3 ++- .../compute/contrib/networks_associate.py | 7 ++++--- .../api/openstack/compute/contrib/os_networks.py | 4 ++-- .../compute/contrib/os_tenant_networks.py | 3 ++- .../openstack/compute/contrib/test_networks.py | 2 ++ nova/tests/integrated/test_api_samples.py | 16 ++++++++++++++++ 7 files changed, 30 insertions(+), 8 deletions(-) diff --git a/nova/api/openstack/compute/contrib/cloudpipe_update.py b/nova/api/openstack/compute/contrib/cloudpipe_update.py index 4ac040fb60..662915ba8e 100644 --- a/nova/api/openstack/compute/contrib/cloudpipe_update.py +++ b/nova/api/openstack/compute/contrib/cloudpipe_update.py @@ -13,6 +13,7 @@ # under the License. +import webob import webob.exc from nova.api.openstack import extensions @@ -55,7 +56,7 @@ def update(self, req, id, body): msg = _("Invalid request body: %s") % unicode(ex) raise webob.exc.HTTPBadRequest(explanation=msg) - return webob.exc.HTTPAccepted() + return webob.Response(status_int=202) class Cloudpipe_update(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/compute/contrib/fixed_ips.py b/nova/api/openstack/compute/contrib/fixed_ips.py index 199a4a105d..be071f73df 100644 --- a/nova/api/openstack/compute/contrib/fixed_ips.py +++ b/nova/api/openstack/compute/contrib/fixed_ips.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import webob import webob.exc from nova.api.openstack import extensions @@ -74,7 +75,7 @@ def _set_reserved(self, context, address, reserved): msg = _("Fixed IP %s not found") % address raise webob.exc.HTTPNotFound(explanation=msg) - return webob.exc.HTTPAccepted() + return webob.Response(status_int=202) class Fixed_ips(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py index 50edcffe9b..751d75b831 100644 --- a/nova/api/openstack/compute/contrib/networks_associate.py +++ b/nova/api/openstack/compute/contrib/networks_associate.py @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. +import webob from webob import exc from nova.api.openstack import extensions @@ -43,7 +44,7 @@ def _disassociate_host_only(self, req, id, body): msg = _('Disassociate host is not implemented by the configured ' 'Network API') raise exc.HTTPNotImplemented(explanation=msg) - return exc.HTTPAccepted() + return webob.Response(status_int=202) @wsgi.action("disassociate_project") def _disassociate_project_only(self, req, id, body): @@ -60,7 +61,7 @@ def _disassociate_project_only(self, req, id, body): 'configured Network API') raise exc.HTTPNotImplemented(explanation=msg) - return exc.HTTPAccepted() + return webob.Response(status_int=202) @wsgi.action("associate_host") def _associate_host(self, req, id, body): @@ -78,7 +79,7 @@ def _associate_host(self, req, id, body): 'Network API') raise exc.HTTPNotImplemented(explanation=msg) - return exc.HTTPAccepted() + return webob.Response(status_int=202) class Networks_associate(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py index 2cbb46ff4e..605e650ef9 100644 --- a/nova/api/openstack/compute/contrib/os_networks.py +++ b/nova/api/openstack/compute/contrib/os_networks.py @@ -82,7 +82,7 @@ def _disassociate_host_and_project(self, req, id, body): msg = _('Disassociate network is not implemented by the ' 'configured Network API') raise exc.HTTPNotImplemented(explanation=msg) - return exc.HTTPAccepted() + return webob.Response(status_int=202) def show(self, req, id): context = req.environ['nova.context'] @@ -106,7 +106,7 @@ def delete(self, req, id): except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) - return exc.HTTPAccepted() + return webob.Response(status_int=202) def create(self, req, body): context = req.environ['nova.context'] diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py index e38a3cbac6..5e4eabd787 100644 --- a/nova/api/openstack/compute/contrib/os_tenant_networks.py +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -17,6 +17,7 @@ import netaddr import netaddr.core as netexc from oslo.config import cfg +import webob from webob import exc from nova.api.openstack import extensions @@ -146,7 +147,7 @@ def _rollback_quota(reservation): if CONF.enable_network_quota and reservation: QUOTAS.commit(context, reservation) - response = exc.HTTPAccepted() + response = webob.Response(status_int=202) return response diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py index 48fe473217..68cba93019 100644 --- a/nova/tests/api/openstack/compute/contrib/test_networks.py +++ b/nova/tests/api/openstack/compute/contrib/test_networks.py @@ -108,6 +108,8 @@ def disable_vlan(self): self._vlan_is_disabled = True def delete(self, context, network_id): + if network_id == 'always_delete': + return True if network_id == -1: raise exception.NetworkInUse(network_id=network_id) for i, network in enumerate(self.networks): diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 0783abfa38..2023081d37 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -1006,6 +1006,7 @@ def test_floating_ips_delete(self): self.test_floating_ips_create() response = self._do_delete('os-floating-ips/%d' % 1) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") class ExtendedFloatingIpsJsonTest(FloatingIpsJsonTest): @@ -1362,6 +1363,7 @@ def test_cloud_pipe_update(self): 'cloud-pipe-update-req', subs) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest): @@ -1551,6 +1553,7 @@ def test_fixed_ip_reserve(self): 'fixedip-post-req', project) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") def test_get_fixed_ip(self): # Return data about the given fixed ip. @@ -2604,6 +2607,7 @@ def test_delete_network(self): response = self._do_delete('os-tenant-networks/%s' % net["network"]["id"]) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") class OsNetworksXmlTests(OsNetworksJsonTests): @@ -2615,6 +2619,7 @@ def test_delete_network(self): network_id = net.find('id').text response = self._do_delete('os-tenant-networks/%s' % network_id) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") class NetworksJsonTests(ApiSampleTestBaseV2): @@ -2647,6 +2652,7 @@ def test_network_disassociate(self): response = self._do_post('os-networks/%s/action' % uuid, 'networks-disassociate-req', {}) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") def test_network_show(self): uuid = test_networks.FAKE_NETWORKS[0]['uuid'] @@ -2664,6 +2670,12 @@ def test_network_add(self): response = self._do_post("os-networks/add", 'network-add-req', {}) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") + + def test_network_delete(self): + response = self._do_delete('os-networks/always_delete') + self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") class NetworksXmlTests(NetworksJsonTests): @@ -2699,24 +2711,28 @@ def test_disassociate(self): 'network-disassociate-req', {}) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") def test_disassociate_host(self): response = self._do_post('os-networks/1/action', 'network-disassociate-host-req', {}) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") def test_disassociate_project(self): response = self._do_post('os-networks/1/action', 'network-disassociate-project-req', {}) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") def test_associate_host(self): response = self._do_post('os-networks/1/action', 'network-associate-host-req', {"host": "testHost"}) self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") class NetworksAssociateXmlTests(NetworksAssociateJsonTests): From 37918fd8765454a1940c55f9366ef9ef7a0be46f Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Tue, 5 Aug 2014 11:06:14 -0700 Subject: [PATCH 350/486] Fix Trusted Filter to work with Mt. Wilson `vtime` The Trusted Filter expects that `vtime` is returned from the attestation server in an ISO8601 format, but the Mt. Wilson attestation server can return it in a locale appropriate string format instead. There's no way to configure Mt. Wilson to do the right thing so we should just try to parse the returned `vtime` as a string formatted date. Closes-Bug: #1353029 Change-Id: Ic7351e0463c014321bdb4fcfeba90ac51460b325 --- nova/scheduler/filters/trusted_filter.py | 13 ++++++++++--- nova/tests/scheduler/test_host_filters.py | 23 +++++++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index 4ace9c8d02..6be3f57a09 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -237,9 +237,16 @@ def _update_cache_entry(self, state): entry['vtime'] = timeutils.normalize_time( timeutils.parse_isotime(state['vtime'])) except ValueError: - # Mark the system as un-trusted if get invalid vtime. - entry['trust_lvl'] = 'unknown' - entry['vtime'] = timeutils.utcnow() + try: + # Mt. Wilson does not necessarily return an ISO8601 formatted + # `vtime`, so we should try to parse it as a string formatted + # datetime. + vtime = timeutils.parse_strtime(state['vtime'], fmt="%c") + entry['vtime'] = timeutils.normalize_time(vtime) + except ValueError: + # Mark the system as un-trusted if get invalid vtime. + entry['trust_lvl'] = 'unknown' + entry['vtime'] = timeutils.utcnow() self.compute_nodes[host] = entry diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 3b8c8f6674..ef695037af 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -1449,6 +1449,29 @@ def test_trusted_filter_combine_hosts(self, mockdb): filt_cls.host_passes(host, filter_properties) # Fill the caches self.assertEqual(set(self.oat_hosts), set(['node1', 'node2'])) + def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self): + self.oat_data = {"hosts": [{"host_name": "host1", + "trust_lvl": "trusted", + "vtime": timeutils.strtime(fmt="%c")}, + {"host_name": "host2", + "trust_lvl": "trusted", + "vtime": timeutils.strtime(fmt="%D")}, + # This is just a broken date to ensure that + # we're not just arbitrarily accepting any + # date format. + ]} + self._stub_service_is_up(True) + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trust:trusted_host': 'trusted'} + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'host1', {}) + bad_host = fakes.FakeHostState('host2', 'host2', {}) + + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + self.assertFalse(filt_cls.host_passes(bad_host, filter_properties)) + def test_core_filter_passes(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} From b5f22359dfbd8d579954421da3f9053acc78553f Mon Sep 17 00:00:00 2001 From: Andrew Melton Date: Fri, 23 May 2014 16:32:52 -0400 Subject: [PATCH 351/486] Add unit tests for libvirt domain creation Tests for: nova.virt.libvirt.driver.LibvirtDriver.get_guest_config nova.virt.libvirt.driver.LibvirtDriver._create_domain Partially implements: bp libvirt-lxc-user-namespaces Change-Id: I4aada9ac06cd423b04a4c6136165c65e64e378aa --- nova/tests/virt/libvirt/test_driver.py | 121 ++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 1 deletion(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index b237ba6156..2003dffe0d 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -1085,7 +1085,6 @@ def test_get_guest_config(self, time_mock): vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestVideo) - self.assertEqual(len(cfg.metadata), 1) self.assertIsInstance(cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance) @@ -1126,6 +1125,29 @@ def test_get_guest_config(self, time_mock): self.assertEqual(33550336, cfg.metadata[0].flavor.swap) + def test_get_guest_config_lxc(self): + self.flags(virt_type='lxc', group='libvirt') + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + cfg = conn._get_guest_config(instance_ref, + _fake_network_info(self.stubs, 1), + None, {'mapping': {}}) + self.assertEqual(instance_ref["uuid"], cfg.uuid) + self.assertEqual(2 * units.Mi, cfg.memory) + self.assertEqual(1, cfg.vcpus) + self.assertEqual(vm_mode.EXE, cfg.os_type) + self.assertEqual("/sbin/init", cfg.os_init_path) + self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) + self.assertIsNone(cfg.os_root) + self.assertEqual(3, len(cfg.devices)) + self.assertIsInstance(cfg.devices[0], + vconfig.LibvirtConfigGuestFilesys) + self.assertIsInstance(cfg.devices[1], + vconfig.LibvirtConfigGuestInterface) + self.assertIsInstance(cfg.devices[2], + vconfig.LibvirtConfigGuestConsole) + def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) @@ -7400,6 +7422,103 @@ def test_get_domain_info_with_more_return(self, lookup_mock): dom_mock.ID.assert_called_once_with() lookup_mock.assert_called_once_with(instance['name']) + @mock.patch.object(fake_libvirt_utils, 'get_instance_path') + def test_create_domain(self, mock_get_inst_path): + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + mock_domain = mock.MagicMock() + mock_instance = mock.MagicMock() + mock_get_inst_path.return_value = '/tmp/' + + domain = conn._create_domain(domain=mock_domain, + instance=mock_instance) + + self.assertEqual(mock_domain, domain) + mock_get_inst_path.assertHasCalls([mock.call(mock_instance)]) + mock_domain.createWithFlags.assertHasCalls([mock.call(0)]) + + @mock.patch('nova.virt.disk.api.clean_lxc_namespace') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') + @mock.patch('nova.virt.disk.api.setup_container') + @mock.patch('nova.openstack.common.fileutils.ensure_tree') + @mock.patch.object(fake_libvirt_utils, 'get_instance_path') + def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree, + mock_setup_container, mock_get_info, mock_clean): + self.flags(virt_type='lxc', group='libvirt') + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + mock_domain = mock.MagicMock() + mock_instance = mock.MagicMock() + inst_sys_meta = dict() + mock_instance.system_metadata = inst_sys_meta + mock_get_inst_path.return_value = '/tmp/' + mock_image_backend = mock.MagicMock() + conn.image_backend = mock_image_backend + mock_image = mock.MagicMock() + mock_image.path = '/tmp/test.img' + conn.image_backend.image.return_value = mock_image + mock_setup_container.return_value = '/dev/nbd0' + mock_get_info.return_value = {'state': power_state.RUNNING} + + domain = conn._create_domain(domain=mock_domain, + instance=mock_instance) + + self.assertEqual(mock_domain, domain) + self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) + mock_instance.save.assert_has_calls([mock.call()]) + mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) + mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) + mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) + conn.image_backend.image.assert_has_calls([mock.call(mock_instance, + 'disk')]) + setup_container_call = mock.call('/tmp/test.img', + container_dir='/tmp/rootfs', + use_cow=CONF.use_cow_images) + mock_setup_container.assert_has_calls([setup_container_call]) + mock_get_info.assert_has_calls([mock.call(mock_instance)]) + mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) + + @mock.patch('nova.virt.disk.api.teardown_container') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') + @mock.patch('nova.virt.disk.api.setup_container') + @mock.patch('nova.openstack.common.fileutils.ensure_tree') + @mock.patch.object(fake_libvirt_utils, 'get_instance_path') + def test_create_domain_lxc_not_running(self, mock_get_inst_path, + mock_ensure_tree, + mock_setup_container, + mock_get_info, mock_teardown): + self.flags(virt_type='lxc', group='libvirt') + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + mock_domain = mock.MagicMock() + mock_instance = mock.MagicMock() + inst_sys_meta = dict() + mock_instance.system_metadata = inst_sys_meta + mock_get_inst_path.return_value = '/tmp/' + mock_image_backend = mock.MagicMock() + conn.image_backend = mock_image_backend + mock_image = mock.MagicMock() + mock_image.path = '/tmp/test.img' + conn.image_backend.image.return_value = mock_image + mock_setup_container.return_value = '/dev/nbd0' + mock_get_info.return_value = {'state': power_state.SHUTDOWN} + + domain = conn._create_domain(domain=mock_domain, + instance=mock_instance) + + self.assertEqual(mock_domain, domain) + self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) + mock_instance.save.assert_has_calls([mock.call()]) + mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) + mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) + mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) + conn.image_backend.image.assert_has_calls([mock.call(mock_instance, + 'disk')]) + setup_container_call = mock.call('/tmp/test.img', + container_dir='/tmp/rootfs', + use_cow=CONF.use_cow_images) + mock_setup_container.assert_has_calls([setup_container_call]) + mock_get_info.assert_has_calls([mock.call(mock_instance)]) + teardown_call = mock.call(container_dir='/tmp/rootfs') + mock_teardown.assert_has_calls([teardown_call]) + def test_create_domain_define_xml_fails(self): """Tests that the xml is logged when defining the domain fails.""" fake_xml = "this is a test" From b29443b7f53bef00f7b96da9d7967f05ac6a1c30 Mon Sep 17 00:00:00 2001 From: James Carey Date: Wed, 13 Aug 2014 21:05:54 +0000 Subject: [PATCH 352/486] Partial oslo-incubator sync -- log.py This patch pulls in the changes in openstack/common/log.py that fix a problem exposed by the removal of translation from LOG.debug messages. This removal causes the messages to no longer be unicode, which can cause formatting problems. The changes in log.py ensure that any message that is not of six.test_type is coverted to six.text_type. Note that this is required to complete blueprint: i18n-enablement. Generated with: python update.py --base nova --dest-dir /opt/stack/nova --modules log log: 759bd87 Merge "Set keystonemiddleware and routes.middleware to log on WARN l 71d072f Merge "Except socket.error if syslog isn't running" 37c0091 Add unicode coercion of logged messages to ContextFormatter 6614413 Correct coercion of logged message to unicode 1188d88 Except socket.error if syslog isn't running ac995be Fix E126 pep8 errors 36e5c2d Merge "Adjust oslo logging to provide adapter is enabled for" 631f880 Set keystonemiddleware and routes.middleware to log on WARN level 726d00a Adjust oslo logging to provide adapter is enabled for Change-Id: I255a68fc60963386e8fefe65c3ffd269795adbf4 Closes-Bug: 1348244 --- nova/openstack/common/log.py | 64 +++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py index bc9cbfa411..62c1de3358 100644 --- a/nova/openstack/common/log.py +++ b/nova/openstack/common/log.py @@ -33,6 +33,7 @@ import logging.config import logging.handlers import os +import socket import sys import traceback @@ -40,6 +41,8 @@ import six from six import moves +_PY26 = sys.version_info[0:2] == (2, 6) + from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -124,7 +127,8 @@ 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', - 'urllib3.connectionpool=WARN', 'websocket=WARN'] + 'urllib3.connectionpool=WARN', 'websocket=WARN', + "keystonemiddleware=WARN", "routes.middleware=WARN"] log_opts = [ cfg.StrOpt('logging_context_format_string', @@ -227,6 +231,15 @@ class BaseLoggerAdapter(logging.LoggerAdapter): def audit(self, msg, *args, **kwargs): self.log(logging.AUDIT, msg, *args, **kwargs) + def isEnabledFor(self, level): + if _PY26: + # This method was added in python 2.7 (and it does the exact + # same logic, so we need to do the exact same logic so that + # python 2.6 has this capability as well). + return self.logger.isEnabledFor(level) + else: + return super(BaseLoggerAdapter, self).isEnabledFor(level) + class LazyAdapter(BaseLoggerAdapter): def __init__(self, name='unknown', version='unknown'): @@ -289,11 +302,10 @@ def deprecated(self, msg, *args, **kwargs): self.warn(stdmsg, *args, **kwargs) def process(self, msg, kwargs): - # NOTE(mrodden): catch any Message/other object and - # coerce to unicode before they can get - # to the python logging and possibly - # cause string encoding trouble - if not isinstance(msg, six.string_types): + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(msg, six.text_type): msg = six.text_type(msg) if 'extra' not in kwargs: @@ -418,12 +430,12 @@ def set_defaults(logging_context_format_string=None, # later in a backwards in-compatible change if default_log_levels is not None: cfg.set_defaults( - log_opts, - default_log_levels=default_log_levels) + log_opts, + default_log_levels=default_log_levels) if logging_context_format_string is not None: cfg.set_defaults( - log_opts, - logging_context_format_string=logging_context_format_string) + log_opts, + logging_context_format_string=logging_context_format_string) def _find_facility_from_conf(): @@ -472,18 +484,6 @@ def _setup_logging_from_conf(project, version): for handler in log_root.handlers: log_root.removeHandler(handler) - if CONF.use_syslog: - facility = _find_facility_from_conf() - # TODO(bogdando) use the format provided by RFCSysLogHandler - # after existing syslog format deprecation in J - if CONF.use_syslog_rfc_format: - syslog = RFCSysLogHandler(address='/dev/log', - facility=facility) - else: - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - logpath = _get_log_file_path() if logpath: filelog = logging.handlers.WatchedFileHandler(logpath) @@ -542,6 +542,20 @@ def _setup_logging_from_conf(project, version): else: logger.setLevel(level_name) + if CONF.use_syslog: + try: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(facility=facility) + else: + syslog = logging.handlers.SysLogHandler(facility=facility) + log_root.addHandler(syslog) + except socket.error: + log_root.error('Unable to add syslog handler. Verify that syslog' + 'is running.') + _loggers = {} @@ -611,6 +625,12 @@ def __init__(self, *args, **kwargs): def format(self, record): """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(record.msg, six.text_type): + record.msg = six.text_type(record.msg) + # store project info record.project = self.project record.version = self.version From bbff16a6fc8fa7b14de1c5f97bae70d1c5fd8209 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 11 Aug 2014 21:24:07 +0000 Subject: [PATCH 353/486] Add new db api get functions for ec2_snapshot This patch adds two new getters for EC2 snapshot mappings to the db API. The existing getters don't return the full db model. Instead, they only return an ID or UUID. The full db model is needed when this gets converted over to a nova object. Related to blueprint convert-ec2-api-to-use-nova-objects Change-Id: I8e2ed5086a7768f373d35c2f34386f2d30d11e07 --- nova/db/api.py | 8 ++++++++ nova/db/sqlalchemy/api.py | 24 ++++++++++++++++++++++++ nova/tests/db/test_db_api.py | 22 ++++++++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/nova/db/api.py b/nova/db/api.py index 801a0d0fe6..ba10f6b9ae 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1200,6 +1200,14 @@ def ec2_snapshot_create(context, snapshot_id, forced_id=None): return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id) +def ec2_snapshot_get_by_ec2_id(context, ec2_id): + return IMPL.ec2_snapshot_get_by_ec2_id(context, ec2_id) + + +def ec2_snapshot_get_by_uuid(context, snapshot_uuid): + return IMPL.ec2_snapshot_get_by_uuid(context, snapshot_uuid) + + #################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index f92e498a68..75bd3fb0d9 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3476,6 +3476,30 @@ def get_snapshot_uuid_by_ec2_id(context, ec2_id): return result['uuid'] +@require_context +def ec2_snapshot_get_by_ec2_id(context, ec2_id): + result = _ec2_snapshot_get_query(context).\ + filter_by(id=ec2_id).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=ec2_id) + + return result + + +@require_context +def ec2_snapshot_get_by_uuid(context, snapshot_uuid): + result = _ec2_snapshot_get_query(context).\ + filter_by(uuid=snapshot_uuid).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid) + + return result + + ################### diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 437830d527..036be42f03 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -6261,6 +6261,8 @@ def check_exc_format(method, value): check_exc_format(db.get_snapshot_uuid_by_ec2_id, 123456) check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake') check_exc_format(db.get_instance_uuid_by_ec2_id, 123456) + check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456) + check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake') def test_ec2_volume_create(self): vol = db.ec2_volume_create(self.ctxt, 'fake-uuid') @@ -6302,6 +6304,26 @@ def test_get_snapshot_uuid_by_ec2_id_not_found(self): db.get_snapshot_uuid_by_ec2_id, self.ctxt, 100500) + def test_ec2_snapshot_get_by_ec2_id(self): + snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') + snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id']) + self.assertEqual(snap2['uuid'], 'fake-uuid') + + def test_ec2_snapshot_get_by_uuid(self): + snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') + snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid') + self.assertEqual(snap['id'], snap2['id']) + + def test_ec2_snapshot_get_by_ec2_id_not_found(self): + self.assertRaises(exception.SnapshotNotFound, + db.ec2_snapshot_get_by_ec2_id, + self.ctxt, 123456) + + def test_ec2_snapshot_get_by_uuid_not_found(self): + self.assertRaises(exception.SnapshotNotFound, + db.ec2_snapshot_get_by_uuid, + self.ctxt, 'fake-uuid') + def test_ec2_instance_create(self): inst = db.ec2_instance_create(self.ctxt, 'fake-uuid') self.assertIsNotNone(inst['id']) From f5a22790be1b55384b8d3259e4f324eccbcb0b5f Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Wed, 13 Aug 2014 15:36:37 -0700 Subject: [PATCH 354/486] Use flavor in confirm-resize to drop claim Current the confirm_resize() clean the stashed instance_type information before invoking the drop_resize_claim, and the drop_resize_claim() try to fetch the old instance_type in system metadata, which will be wrong. We change the _cleanup_stored_instance_types() to pass back the to_be_dropped instance_type, and use that information for drop_resize_claim. Change-Id: Iba36acc75d3840104746596977b33bcd4e010f2e --- nova/compute/manager.py | 19 +++++++++++-------- nova/tests/compute/test_compute.py | 7 ++++++- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 660785e1ad..15dbce1cf0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3149,20 +3149,23 @@ def _cleanup_stored_instance_types(self, migration, instance, instance's system_metadata. Optionally update the "current" instance_type to the saved old one first. - Returns the updated system_metadata as a dict, as well as the - post-cleanup current instance type. + Returns the updated system_metadata as a dict, the + post-cleanup current instance type and the to-be dropped + instance type. """ sys_meta = instance.system_metadata if restore_old: instance_type = flavors.extract_flavor(instance, 'old_') + drop_instance_type = flavors.extract_flavor(instance) sys_meta = flavors.save_flavor_info(sys_meta, instance_type) else: instance_type = flavors.extract_flavor(instance) + drop_instance_type = flavors.extract_flavor(instance, 'old_') flavors.delete_flavor_info(sys_meta, 'old_') flavors.delete_flavor_info(sys_meta, 'new_') - return sys_meta, instance_type + return sys_meta, instance_type, drop_instance_type @wrap_exception() @wrap_instance_event @@ -3226,8 +3229,8 @@ def _confirm_resize(self, context, instance, quotas, with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(danms): delete stashed migration information - sys_meta, instance_type = self._cleanup_stored_instance_types( - migration, instance) + sys_meta, instance_type, old_instance_type = ( + self._cleanup_stored_instance_types(migration, instance)) sys_meta.pop('old_vm_state', None) instance.system_metadata = sys_meta @@ -3245,7 +3248,7 @@ def _confirm_resize(self, context, instance, quotas, migration.save(context.elevated()) rt = self._get_resource_tracker(migration.source_node) - rt.drop_resize_claim(instance, prefix='old_') + rt.drop_resize_claim(instance, old_instance_type) # NOTE(mriedem): The old_vm_state could be STOPPED but the user # might have manually powered up the instance to confirm the @@ -3350,8 +3353,8 @@ def finish_revert_resize(self, context, instance, reservations, migration): self._notify_about_instance_usage( context, instance, "resize.revert.start") - sys_meta, instance_type = self._cleanup_stored_instance_types( - migration, instance, True) + sys_meta, instance_type, drop_instance_type = ( + self._cleanup_stored_instance_types(migration, instance, True)) # NOTE(mriedem): delete stashed old_vm_state information; we # default to ACTIVE for backwards compatibility if old_vm_state diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 8991a7ab60..7a05737062 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -5098,11 +5098,15 @@ def _test_cleanup_stored_instance_types(self, old, new, revert=False): if revert: flavors.extract_flavor(instance, 'old_').AndReturn( {'instance_type_id': old}) + flavors.extract_flavor(instance).AndReturn( + {'instance_type_id': new}) flavors.save_flavor_info( sys_meta, {'instance_type_id': old}).AndReturn(sys_meta) else: flavors.extract_flavor(instance).AndReturn( {'instance_type_id': new}) + flavors.extract_flavor(instance, 'old_').AndReturn( + {'instance_type_id': old}) flavors.delete_flavor_info( sys_meta, 'old_').AndReturn(sys_meta) flavors.delete_flavor_info( @@ -5113,7 +5117,8 @@ def _test_cleanup_stored_instance_types(self, old, new, revert=False): revert) self.assertEqual(res, (sys_meta, - {'instance_type_id': revert and old or new})) + {'instance_type_id': revert and old or new}, + {'instance_type_id': revert and new or old})) def test_cleanup_stored_instance_types_for_resize(self): self._test_cleanup_stored_instance_types('1', '2') From 49d4defee6b5182eca31431ae8273d3a523f3e61 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 12 Aug 2014 17:13:59 -0400 Subject: [PATCH 355/486] VMware: revert deletion of cleanup_host Commit 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b removes cleanup_host by mistake. This adds the missing method back. Related-Bug: #1355875 Change-Id: If695bf00613fe389af91f453dbc8191698d95a94 --- nova/tests/virt/vmwareapi/fake.py | 17 +++++++++++- nova/tests/virt/vmwareapi/test_driver_api.py | 27 ++++++++++++++++++++ nova/virt/vmwareapi/driver.py | 12 +++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index 68636cbd17..bfa9002324 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -1010,6 +1010,21 @@ def create(self, obj_name): return DataObject(obj_name) +class FakeService(DataObject): + """Fake service class.""" + + def Logout(self, session_manager): + pass + + +class FakeClient(DataObject): + """Fake client class.""" + + def __init__(self): + """Creates a namespace object.""" + self.service = FakeService() + + class FakeSession(object): """Fake Session Class.""" @@ -1052,7 +1067,7 @@ def __init__(self, protocol="https", host="localhost", trace=None): contents and the cookies for the session. """ self._session = None - self.client = DataObject() + self.client = FakeClient() self.client.factory = FakeFactory() transport = DataObject() diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 6a3aefe248..df2f795e34 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -383,6 +383,33 @@ def _set_exception_vars(self): self.task_ref = None self.exception = False + def test_cleanup_host(self): + self.conn.init_host("fake_host") + try: + self.conn.cleanup_host("fake_host") + except Exception as ex: + self.fail("cleanup_host raised: %s" % ex) + + @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__') + def test_cleanup_host_direct(self, mock_init): + mock_init.return_value = None + vcdriver = driver.VMwareVCDriver(None, False) + vcdriver._session = mock.Mock() + vcdriver.cleanup_host("foo") + vcdriver._session.vim.get_service_content.assert_called_once_with() + vcdriver._session.vim.client.service.Logout.assert_called_once_with( + vcdriver._session.vim.get_service_content().sessionManager + ) + + @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__') + def test_cleanup_host_direct_with_bad_logout(self, mock_init): + mock_init.return_value = None + vcdriver = driver.VMwareVCDriver(None, False) + vcdriver._session = mock.Mock() + fault = suds.WebFault(mock.Mock(), mock.Mock()) + vcdriver._session.vim.client.service.Logout.side_effect = fault + vcdriver.cleanup_host("foo") + def test_driver_capabilities(self): self.assertTrue(self.conn.capabilities['has_imagecache']) self.assertFalse(self.conn.capabilities['supports_recreate']) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index bb69935293..646d11f7c1 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -25,6 +25,7 @@ from eventlet import event from oslo.config import cfg +import suds from nova import exception from nova.i18n import _, _LC, _LW @@ -169,6 +170,17 @@ def init_host(self, host): if vim is None: self._session._create_session() + def cleanup_host(self, host): + # NOTE(hartsocks): we lean on the init_host to force the vim object + # to not be None. + vim = self._session.vim + service_content = vim.get_service_content() + session_manager = service_content.sessionManager + try: + vim.client.service.Logout(session_manager) + except suds.WebFault: + LOG.debug("No vSphere session was open during cleanup_host.") + def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" From 42d017c0d498aa4034032104f9cdd56300c866e0 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 13 Aug 2014 22:34:39 -0700 Subject: [PATCH 356/486] Fix hacking check for jsonutils Hacking checks with logical_lines should use yield not return. The jsonutils rule was added in: I86ed6cd3316dd4da5e1b10b36a3ddba3739316d3 Now that the function uses yield, it can trigger twice in dumps (dump and dump), so look for a '(' at the afterwards. Don't display the '(' at the end of the error message since its confusing to read. Change-Id: I277dba08fdd30734409eee36008cebda35886968 Closes-Bug: #1356687 --- nova/hacking/checks.py | 4 ++-- nova/tests/test_hacking.py | 24 ++++++++++++------------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py index a1dd61419a..f4d7832441 100644 --- a/nova/hacking/checks.py +++ b/nova/hacking/checks.py @@ -296,11 +296,11 @@ def use_jsonutils(logical_line, filename): msg = "N323: jsonutils.%(fun)s must be used instead of json.%(fun)s" if "json." in logical_line: - json_funcs = ['dumps', 'dump', 'loads', 'load'] + json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] for f in json_funcs: pos = logical_line.find('json.%s' % f) if pos != -1: - return (pos, msg % {'fun': f}) + yield (pos, msg % {'fun': f[:-1]}) def factory(register): diff --git a/nova/tests/test_hacking.py b/nova/tests/test_hacking.py index f6f796b138..611cbf703a 100644 --- a/nova/tests/test_hacking.py +++ b/nova/tests/test_hacking.py @@ -222,19 +222,19 @@ def test_use_jsonutils(self): def __get_msg(fun): msg = ("N323: jsonutils.%(fun)s must be used instead of " "json.%(fun)s" % {'fun': fun}) - return (0, msg) + return [(0, msg)] for method in ('dump', 'dumps', 'load', 'loads'): self.assertEqual( __get_msg(method), - checks.use_jsonutils("json.%s" % method, - "./nova/virt/xenapi/driver.py")) - self.assertIsNone( - checks.use_jsonutils("json.%s" % method, - "./plugins/xenserver/script.py")) - self.assertIsNone( - checks.use_jsonutils("jsonx.%s" % method, - "./nova/virt/xenapi/driver.py")) - self.assertIsNone( - checks.use_jsonutils("json.dumb", - "./nova/virt/xenapi/driver.py")) + list(checks.use_jsonutils("json.%s(" % method, + "./nova/virt/xenapi/driver.py"))) + self.assertEqual(0, + len(list(checks.use_jsonutils("json.%s(" % method, + "./plugins/xenserver/script.py")))) + self.assertEqual(0, + len(list(checks.use_jsonutils("jsonx.%s(" % method, + "./nova/virt/xenapi/driver.py")))) + self.assertEqual(0, + len(list(checks.use_jsonutils("json.dumb", + "./nova/virt/xenapi/driver.py")))) From a0a6017f9b58941a4f8e67300a5dc57e34aada35 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Thu, 14 Aug 2014 03:40:09 -0700 Subject: [PATCH 357/486] Hacking: a new hacking check was added that used an existing number Commit 243879f5c51fc45f03491bcb78765945ddf76be8 added in a new hacking check that used an existing number. The new number is 324 (and not 323) Change-Id: I7e604a408387438105c435ad16a1fa3d6491b642 Closes-bug: #1356815 --- HACKING.rst | 1 + nova/hacking/checks.py | 2 +- nova/tests/test_hacking.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 7884ac1f9e..d4cc74b212 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -36,6 +36,7 @@ Nova Specific Commandments - [N321] Validate that LOG messages, except debug ones, have translations - [N322] Method's default argument shouldn't be mutable - [N323] Ensure that the _() function is explicitly imported to ensure proper translations. +- [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s Creating Unit Tests ------------------- diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py index a1dd61419a..98a309ed6c 100644 --- a/nova/hacking/checks.py +++ b/nova/hacking/checks.py @@ -293,7 +293,7 @@ def use_jsonutils(logical_line, filename): if "plugins/xenserver" in filename: return - msg = "N323: jsonutils.%(fun)s must be used instead of json.%(fun)s" + msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s" if "json." in logical_line: json_funcs = ['dumps', 'dump', 'loads', 'load'] diff --git a/nova/tests/test_hacking.py b/nova/tests/test_hacking.py index f6f796b138..522fdc91da 100644 --- a/nova/tests/test_hacking.py +++ b/nova/tests/test_hacking.py @@ -220,7 +220,7 @@ def test_check_explicit_underscore_import(self): def test_use_jsonutils(self): def __get_msg(fun): - msg = ("N323: jsonutils.%(fun)s must be used instead of " + msg = ("N324: jsonutils.%(fun)s must be used instead of " "json.%(fun)s" % {'fun': fun}) return (0, msg) From 65b3a4f6070a58ce4cb3cfb773fc4b7a3f47979c Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 12 Aug 2014 09:53:27 +0200 Subject: [PATCH 358/486] filter: add per-aggregate filter to configure max_io_ops_per_host Adds a filter AggregateIoOpsFilter whichprovides the ability to read from aggregates metadata "max_io_ops_per_host". DocImpact Implements: blueprint per-aggregate-max-io-ops-per-host Change-Id: I086033e7904c16995298bee7196ad3e7b5dc7aaf --- doc/source/devref/filter_scheduler.rst | 7 +++++ nova/scheduler/filters/io_ops_filter.py | 32 ++++++++++++++++++++++- nova/tests/scheduler/test_host_filters.py | 25 ++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst index 872f3f6337..60971dba49 100644 --- a/doc/source/devref/filter_scheduler.rst +++ b/doc/source/devref/filter_scheduler.rst @@ -104,6 +104,12 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`): ``max_io_ops_per_host`` setting. Maximum number of I/O intensive instances allowed to run on this host, the host will be ignored by scheduler if more than ``max_io_ops_per_host`` instances such as build/resize/snapshot etc are running on it. +* |AggregateIoOpsFilter| - filters hosts by I/O operations with per-aggregate + ``max_io_ops_per_host`` setting. If no per-aggregate value is found, it will + fall back to the global default ``max_io_ops_per_host``. If more than + one value is found for a host (meaning the host is in two or more different + aggregates with different max io operations settings), the minimum value + will be used. * |PciPassthroughFilter| - Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. * |SimpleCIDRAffinityFilter| - allows to put a new instance on a host within @@ -356,6 +362,7 @@ in :mod:``nova.tests.scheduler``. .. |DiskFilter| replace:: :class:`DiskFilter ` .. |NumInstancesFilter| replace:: :class:`NumInstancesFilter ` .. |IoOpsFilter| replace:: :class:`IoOpsFilter ` +.. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter ` .. |PciPassthroughFilter| replace:: :class:`PciPassthroughFilter ` .. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter ` .. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter ` diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py index de9ce5ab80..1ac20356d3 100644 --- a/nova/scheduler/filters/io_ops_filter.py +++ b/nova/scheduler/filters/io_ops_filter.py @@ -15,8 +15,10 @@ from oslo.config import cfg +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.scheduler import filters +from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) @@ -34,12 +36,16 @@ class IoOpsFilter(filters.BaseHostFilter): """Filter out hosts with too many concurrent I/O operations.""" + def _get_max_io_ops_per_host(self, host_state, filter_properties): + return CONF.max_io_ops_per_host + def host_passes(self, host_state, filter_properties): """Use information about current vm and task states collected from compute node statistics to decide whether to filter. """ num_io_ops = host_state.num_io_ops - max_io_ops = CONF.max_io_ops_per_host + max_io_ops = self._get_max_io_ops_per_host( + host_state, filter_properties) passes = num_io_ops < max_io_ops if not passes: LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host " @@ -47,3 +53,27 @@ def host_passes(self, host_state, filter_properties): {'host_state': host_state, 'max_io_ops': max_io_ops}) return passes + + +class AggregateIoOpsFilter(IoOpsFilter): + """AggregateIoOpsFilter with per-aggregate the max io operations. + + Fall back to global max_io_ops_per_host if no per-aggregate setting found. + """ + + def _get_max_io_ops_per_host(self, host_state, filter_properties): + # TODO(uni): DB query in filter is a performance hit, especially for + # system with lots of hosts. Will need a general solution here to fix + # all filters with aggregate DB call things. + aggregate_vals = utils.aggregate_values_from_db( + filter_properties['context'], + host_state.host, + 'max_io_ops_per_host') + try: + value = utils.validate_num_values( + aggregate_vals, CONF.max_io_ops_per_host, cast_to=int) + except ValueError as e: + LOG.warn(_LW("Could not decode max_io_ops_per_host: '%s'"), e) + value = CONF.max_io_ops_per_host + + return value diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index b6edfbfdea..34af77b538 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -1854,3 +1854,28 @@ def test_metrics_filter_missing_metrics(self): attribute_dict={'metrics': metrics}) filt_cls = self.class_map['MetricsFilter']() self.assertFalse(filt_cls.host_passes(host, None)) + + def test_aggregate_filter_num_iops_value(self): + self.flags(max_io_ops_per_host=7) + filt_cls = self.class_map['AggregateIoOpsFilter']() + host = fakes.FakeHostState('host1', 'node1', + {'num_io_ops': 7}) + filter_properties = {'context': self.context} + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + self._create_aggregate_with_host( + name='fake_aggregate', + hosts=['host1'], + metadata={'max_io_ops_per_host': 8}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + def test_aggregate_filter_num_iops_value_error(self): + self.flags(max_io_ops_per_host=8) + filt_cls = self.class_map['AggregateIoOpsFilter']() + host = fakes.FakeHostState('host1', 'node1', + {'num_io_ops': 7}) + self._create_aggregate_with_host( + name='fake_aggregate', + hosts=['host1'], + metadata={'max_io_ops_per_host': 'XXX'}) + filter_properties = {'context': self.context} + self.assertTrue(filt_cls.host_passes(host, filter_properties)) From 994ec11e00dec8af6f0b486295056ef9b4ba2e4e Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 11 Aug 2014 21:03:18 +0000 Subject: [PATCH 359/486] Add EC2SnapshotMapping object Create a nova object for the mappings between snapshot UUIDs and the corresponding EC2 ID. Part of blueprint convert-ec2-api-to-use-nova-objects Change-Id: Ie29db77e404faff6ab9f12877094ea042e3c9924 --- nova/objects/ec2.py | 38 +++++++++++++++++++++++++++ nova/tests/objects/test_ec2.py | 41 ++++++++++++++++++++++++++++++ nova/tests/objects/test_objects.py | 1 + 3 files changed, 80 insertions(+) diff --git a/nova/objects/ec2.py b/nova/objects/ec2.py index 7642620cb7..d9d9250796 100644 --- a/nova/objects/ec2.py +++ b/nova/objects/ec2.py @@ -92,3 +92,41 @@ def get_by_id(cls, context, ec2_id): db_vmap = db.ec2_volume_get_by_id(context, ec2_id) if db_vmap: return cls._from_db_object(context, cls(context), db_vmap) + + +class EC2SnapshotMapping(base.NovaPersistentObject, base.NovaObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'id': fields.IntegerField(read_only=True), + 'uuid': fields.UUIDField(), + } + + @staticmethod + def _from_db_object(context, smap, db_smap): + for field in smap.fields: + smap[field] = db_smap[field] + smap._context = context + smap.obj_reset_changes() + return smap + + @base.remotable + def create(self, context): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason='already created') + db_smap = db.ec2_snapshot_create(context, self.uuid) + self._from_db_object(context, self, db_smap) + + @base.remotable_classmethod + def get_by_uuid(cls, context, snapshot_uuid): + db_smap = db.ec2_snapshot_get_by_uuid(context, snapshot_uuid) + if db_smap: + return cls._from_db_object(context, cls(context), db_smap) + + @base.remotable_classmethod + def get_by_id(cls, context, ec2_id): + db_smap = db.ec2_snapshot_get_by_ec2_id(context, ec2_id) + if db_smap: + return cls._from_db_object(context, cls(context), db_smap) diff --git a/nova/tests/objects/test_ec2.py b/nova/tests/objects/test_ec2.py index 519a13f96e..8066004845 100644 --- a/nova/tests/objects/test_ec2.py +++ b/nova/tests/objects/test_ec2.py @@ -109,3 +109,44 @@ class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping): class TestRemoteEC2VolumeMapping(test_objects._RemoteTest, _TestEC2VolumeMapping): pass + + +class _TestEC2SnapshotMapping(object): + @staticmethod + def _compare(test, db, obj): + for field, value in db.items(): + test.assertEqual(db[field], obj[field]) + + def test_create(self): + smap = ec2_obj.EC2SnapshotMapping() + smap.uuid = 'fake-uuid-2' + + with mock.patch.object(db, 'ec2_snapshot_create') as create: + create.return_value = fake_map + smap.create(self.context) + + self.assertEqual(self.context, smap._context) + smap._context = None + self._compare(self, fake_map, smap) + + def test_get_by_uuid(self): + with mock.patch.object(db, 'ec2_snapshot_get_by_uuid') as get: + get.return_value = fake_map + smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context, + 'fake-uuid-2') + self._compare(self, fake_map, smap) + + def test_get_by_ec2_id(self): + with mock.patch.object(db, 'ec2_snapshot_get_by_ec2_id') as get: + get.return_value = fake_map + smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1) + self._compare(self, fake_map, smap) + + +class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping): + pass + + +class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest, + _TestEC2SnapshotMapping): + pass diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 59fe895d27..3295d72f12 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -909,6 +909,7 @@ def test_object_serialization_iterables(self): 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba', 'DNSDomainList': '1.0-6e3cc498d89dd7e90f9beb021644221c', 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99', + 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836', 'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143', 'FixedIP': '1.1-082fb26772ce2db783ce4934edca4652', 'FixedIPList': '1.1-8ea5cfca611598f1242fd4095e49e58b', From 6db7eb7d838b00c20540e062ecf7be7b2ae831a2 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 11 Aug 2014 21:51:05 +0000 Subject: [PATCH 360/486] Use EC2SnapshotMapping for creating mappings Use the EC2SnapshotMapping nova object to create the mappings instead of using the db api directly. Part of blueprint convert-ec2-api-to-use-nova-objects Change-Id: If280b1061185faf71c632a7bc4a8530346c18c97 --- nova/api/ec2/cloud.py | 5 ++-- nova/api/ec2/ec2utils.py | 4 ++- nova/tests/api/ec2/test_ec2utils.py | 38 +++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 3 deletions(-) create mode 100644 nova/tests/api/ec2/test_ec2utils.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e04b5f1133..773be0bb75 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -36,7 +36,6 @@ from nova import compute from nova.compute import api as compute_api from nova.compute import vm_states -from nova import db from nova import exception from nova.i18n import _ from nova.i18n import _LW @@ -399,7 +398,9 @@ def create_snapshot(self, context, volume_id, **kwargs): else: snapshot = self.volume_api.create_snapshot(*args) - db.ec2_snapshot_create(context, snapshot['id']) + smap = objects.EC2SnapshotMapping(context, uuid=snapshot['id']) + smap.create() + return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index 88bf255764..ea011ee4aa 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -346,7 +346,9 @@ def get_int_id_from_snapshot_uuid(context, snapshot_uuid): try: return db.get_ec2_snapshot_id_by_uuid(context, snapshot_uuid) except exception.NotFound: - return db.ec2_snapshot_create(context, snapshot_uuid)['id'] + smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid) + smap.create() + return smap.id @memoize diff --git a/nova/tests/api/ec2/test_ec2utils.py b/nova/tests/api/ec2/test_ec2utils.py new file mode 100644 index 0000000000..4e6604329a --- /dev/null +++ b/nova/tests/api/ec2/test_ec2utils.py @@ -0,0 +1,38 @@ +# Copyright 2014 - Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.ec2 import ec2utils +from nova import context +from nova import objects +from nova import test + + +class EC2UtilsTestCase(test.TestCase): + def setUp(self): + self.ctxt = context.get_admin_context() + ec2utils.reset_cache() + super(EC2UtilsTestCase, self).setUp() + + def test_get_int_id_from_snapshot_uuid(self): + smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid') + smap.create() + smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt, + 'fake-uuid') + self.assertEqual(smap.id, smap_id) + + def test_get_int_id_from_snapshot_uuid_creates_mapping(self): + smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt, + 'fake-uuid') + smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id) + self.assertEqual('fake-uuid', smap.uuid) From fa98ec7b42826120870ec059860a19217beff3b3 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 11 Aug 2014 22:01:51 +0000 Subject: [PATCH 361/486] Get EC2 snapshot mappings with nova object Use the EC2SnapshotMapping nova object for getting the mapping instead of directly using the db API. Part of blueprint convert-ec2-api-to-use-objects Change-Id: I258ffe0a3fe3b0d7b2524bb64f1137127731bdd5 --- nova/api/ec2/ec2utils.py | 6 ++++-- nova/tests/api/ec2/test_ec2utils.py | 6 ++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index ea011ee4aa..80ea9c7994 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -344,7 +344,8 @@ def get_int_id_from_snapshot_uuid(context, snapshot_uuid): if snapshot_uuid is None: return try: - return db.get_ec2_snapshot_id_by_uuid(context, snapshot_uuid) + smap = objects.EC2SnapshotMapping.get_by_uuid(context, snapshot_uuid) + return smap.id except exception.NotFound: smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid) smap.create() @@ -353,7 +354,8 @@ def get_int_id_from_snapshot_uuid(context, snapshot_uuid): @memoize def get_snapshot_uuid_from_int_id(context, int_id): - return db.get_snapshot_uuid_by_ec2_id(context, int_id) + smap = objects.EC2SnapshotMapping.get_by_id(context, int_id) + return smap.uuid _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') diff --git a/nova/tests/api/ec2/test_ec2utils.py b/nova/tests/api/ec2/test_ec2utils.py index 4e6604329a..529a6ef72f 100644 --- a/nova/tests/api/ec2/test_ec2utils.py +++ b/nova/tests/api/ec2/test_ec2utils.py @@ -36,3 +36,9 @@ def test_get_int_id_from_snapshot_uuid_creates_mapping(self): 'fake-uuid') smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id) self.assertEqual('fake-uuid', smap.uuid) + + def test_get_snapshot_uuid_from_int_id(self): + smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid') + smap.create() + smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id) + self.assertEqual(smap.uuid, smap_uuid) From ed87ac56c4f7afdaa8c01db1501aed2ede7187af Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 11 Aug 2014 22:07:05 +0000 Subject: [PATCH 362/486] Remove unused db api methods Remove methods that are no longer used from the db API. New methods were created that replaced these when the EC2SnapshotMapping object was created in earlier commits. Related to blueprint convert-ec2-api-to-use-nova-objects Change-Id: I5431dd34d6a3fcea687b9d8f192a822e83ed9402 --- nova/db/api.py | 8 -------- nova/db/sqlalchemy/api.py | 24 ------------------------ nova/tests/db/test_db_api.py | 22 ---------------------- 3 files changed, 54 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index d99a244cc9..d2af85e4a6 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1186,14 +1186,6 @@ def ec2_volume_get_by_uuid(context, volume_uuid): return IMPL.ec2_volume_get_by_uuid(context, volume_uuid) -def get_snapshot_uuid_by_ec2_id(context, ec2_id): - return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id) - - -def get_ec2_snapshot_id_by_uuid(context, snapshot_id): - return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id) - - def ec2_snapshot_create(context, snapshot_id, forced_id=None): return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index da8291c7ef..a7b0434fc1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3452,30 +3452,6 @@ def ec2_snapshot_create(context, snapshot_uuid, id=None): return ec2_snapshot_ref -@require_context -def get_ec2_snapshot_id_by_uuid(context, snapshot_id): - result = _ec2_snapshot_get_query(context).\ - filter_by(uuid=snapshot_id).\ - first() - - if not result: - raise exception.SnapshotNotFound(snapshot_id=snapshot_id) - - return result['id'] - - -@require_context -def get_snapshot_uuid_by_ec2_id(context, ec2_id): - result = _ec2_snapshot_get_query(context).\ - filter_by(id=ec2_id).\ - first() - - if not result: - raise exception.SnapshotNotFound(snapshot_id=ec2_id) - - return result['uuid'] - - @require_context def ec2_snapshot_get_by_ec2_id(context, ec2_id): result = _ec2_snapshot_get_query(context).\ diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index b7664deb20..9c5a856a9e 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -6257,8 +6257,6 @@ def check_exc_format(method, value): except exception.NotFound as exc: self.assertIn(unicode(value), unicode(exc)) - check_exc_format(db.get_ec2_snapshot_id_by_uuid, 'fake') - check_exc_format(db.get_snapshot_uuid_by_ec2_id, 123456) check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake') check_exc_format(db.get_instance_uuid_by_ec2_id, 123456) check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456) @@ -6284,26 +6282,6 @@ def test_ec2_snapshot_create(self): self.assertIsNotNone(snap['id']) self.assertEqual(snap['uuid'], 'fake-uuid') - def test_get_ec2_snapshot_id_by_uuid(self): - snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') - snap_id = db.get_ec2_snapshot_id_by_uuid(self.ctxt, 'fake-uuid') - self.assertEqual(snap['id'], snap_id) - - def test_get_snapshot_uuid_by_ec2_id(self): - snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') - snap_uuid = db.get_snapshot_uuid_by_ec2_id(self.ctxt, snap['id']) - self.assertEqual(snap_uuid, 'fake-uuid') - - def test_get_ec2_snapshot_id_by_uuid_not_found(self): - self.assertRaises(exception.SnapshotNotFound, - db.get_ec2_snapshot_id_by_uuid, - self.ctxt, 'uuid-not-present') - - def test_get_snapshot_uuid_by_ec2_id_not_found(self): - self.assertRaises(exception.SnapshotNotFound, - db.get_snapshot_uuid_by_ec2_id, - self.ctxt, 100500) - def test_ec2_snapshot_get_by_ec2_id(self): snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id']) From aee9dd00f84247a1f0d22caadcae8d1b396f0a5e Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 12 Aug 2014 22:20:10 +0000 Subject: [PATCH 363/486] ec2: Add S3ImageMapping object Add a nova object for another ID-UUID mapping used in the EC2 API. This object will allow the removal of the last bit of direct db api usage by the EC2 API code. Part of blueprint convert-ec2-api-to-use-nova-objects Change-Id: Icf84f2d5a50ce4c78ee7f51db8e6a2b0276afbb2 --- nova/objects/ec2.py | 38 ++++++++++++++++++++++++++++ nova/tests/objects/test_ec2.py | 40 ++++++++++++++++++++++++++++++ nova/tests/objects/test_objects.py | 1 + 3 files changed, 79 insertions(+) diff --git a/nova/objects/ec2.py b/nova/objects/ec2.py index d9d9250796..7556bd57c9 100644 --- a/nova/objects/ec2.py +++ b/nova/objects/ec2.py @@ -130,3 +130,41 @@ def get_by_id(cls, context, ec2_id): db_smap = db.ec2_snapshot_get_by_ec2_id(context, ec2_id) if db_smap: return cls._from_db_object(context, cls(context), db_smap) + + +class S3ImageMapping(base.NovaPersistentObject, base.NovaObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'id': fields.IntegerField(read_only=True), + 'uuid': fields.UUIDField(), + } + + @staticmethod + def _from_db_object(context, s3imap, db_s3imap): + for field in s3imap.fields: + s3imap[field] = db_s3imap[field] + s3imap._context = context + s3imap.obj_reset_changes() + return s3imap + + @base.remotable + def create(self, context): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason='already created') + db_s3imap = db.s3_image_create(context, self.uuid) + self._from_db_object(context, self, db_s3imap) + + @base.remotable_classmethod + def get_by_uuid(cls, context, s3_image_uuid): + db_s3imap = db.s3_image_get_by_uuid(context, s3_image_uuid) + if db_s3imap: + return cls._from_db_object(context, cls(context), db_s3imap) + + @base.remotable_classmethod + def get_by_id(cls, context, s3_id): + db_s3imap = db.s3_image_get(context, s3_id) + if db_s3imap: + return cls._from_db_object(context, cls(context), db_s3imap) diff --git a/nova/tests/objects/test_ec2.py b/nova/tests/objects/test_ec2.py index 8066004845..9b3dc38b18 100644 --- a/nova/tests/objects/test_ec2.py +++ b/nova/tests/objects/test_ec2.py @@ -150,3 +150,43 @@ class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping): class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest, _TestEC2SnapshotMapping): pass + + +class _TestS3ImageMapping(object): + @staticmethod + def _compare(test, db, obj): + for field, value in db.items(): + test.assertEqual(db[field], obj[field]) + + def test_create(self): + s3imap = ec2_obj.S3ImageMapping() + s3imap.uuid = 'fake-uuid-2' + + with mock.patch.object(db, 's3_image_create') as create: + create.return_value = fake_map + s3imap.create(self.context) + + self.assertEqual(self.context, s3imap._context) + s3imap._context = None + self._compare(self, fake_map, s3imap) + + def test_get_by_uuid(self): + with mock.patch.object(db, 's3_image_get_by_uuid') as get: + get.return_value = fake_map + s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context, + 'fake-uuid-2') + self._compare(self, fake_map, s3imap) + + def test_get_by_s3_id(self): + with mock.patch.object(db, 's3_image_get') as get: + get.return_value = fake_map + s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1) + self._compare(self, fake_map, s3imap) + + +class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping): + pass + + +class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping): + pass diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 3295d72f12..167965ac03 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -940,6 +940,7 @@ def test_object_serialization_iterables(self): 'PciDeviceList': '1.0-5da7b4748a5a2594bae2cd0bd211cca2', 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418', 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f', + 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2', 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b', 'SecurityGroupList': '1.0-9513387aabf08c2a7961ac4da4315ed4', 'SecurityGroupRule': '1.0-fdd020bdd7eb8bac744ad6f9a4ef8165', From 749c5e1d9d0ffa11d632e2ec2d21b7c75949577f Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 13 Aug 2014 14:42:12 +0000 Subject: [PATCH 364/486] ec2: Use S3ImageMapping object Use the S3ImageMapping in the EC2 API to remove the last bit of direct db API usage. Part of blueprint convert-ec2-api-to-use-nova-objects. Change-Id: I7d66a4d05f42ffd854afe21024c88b096aeaaeaa --- nova/api/ec2/ec2utils.py | 9 +++++---- nova/tests/api/ec2/test_ec2utils.py | 17 +++++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index 80ea9c7994..963acb892c 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -19,7 +19,6 @@ from nova import availability_zones from nova import context -from nova import db from nova import exception from nova.i18n import _ from nova.network import model as network_model @@ -102,7 +101,7 @@ def resource_type_from_id(context, resource_id): @memoize def id_to_glance_id(context, image_id): """Convert an internal (db) id to a glance id.""" - return db.s3_image_get(context, image_id)['uuid'] + return objects.S3ImageMapping.get_by_id(context, image_id).uuid @memoize @@ -111,9 +110,11 @@ def glance_id_to_id(context, glance_id): if not glance_id: return try: - return db.s3_image_get_by_uuid(context, glance_id)['id'] + return objects.S3ImageMapping.get_by_uuid(context, glance_id).id except exception.NotFound: - return db.s3_image_create(context, glance_id)['id'] + s3imap = objects.S3ImageMapping(context, uuid=glance_id) + s3imap.create() + return s3imap.id def ec2_id_to_glance_id(context, ec2_id): diff --git a/nova/tests/api/ec2/test_ec2utils.py b/nova/tests/api/ec2/test_ec2utils.py index 529a6ef72f..9dceb7de12 100644 --- a/nova/tests/api/ec2/test_ec2utils.py +++ b/nova/tests/api/ec2/test_ec2utils.py @@ -42,3 +42,20 @@ def test_get_snapshot_uuid_from_int_id(self): smap.create() smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id) self.assertEqual(smap.uuid, smap_uuid) + + def test_id_to_glance_id(self): + s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid') + s3imap.create() + uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id) + self.assertEqual(uuid, s3imap.uuid) + + def test_glance_id_to_id(self): + s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid') + s3imap.create() + s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid) + self.assertEqual(s3imap_id, s3imap.id) + + def test_glance_id_to_id_creates_mapping(self): + s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid') + s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id) + self.assertEqual('fake-uuid', s3imap.uuid) From 52de9395e5fe4f328f6dab0b35d660a700787c76 Mon Sep 17 00:00:00 2001 From: Dorin Paslaru Date: Fri, 18 Jul 2014 15:43:16 +0300 Subject: [PATCH 365/486] Fixes Hyper-V agent force_hyperv_utils_v1 flag issue WMI root\virtualization namespace v1 (in Hyper-V) has been removed from Windows Server / Hyper-V Server 2012 R2. Hyper-V compute agent now creates instances which uses root\virtualization\v2 namespace if the agent's OS is Windows Server / Hyper-V Server 2012 R2 or newer. Closes-Bug: #1344036 Change-Id: I874ade4456b92a63959a765c7851bcd001befa32 --- nova/tests/virt/hyperv/test_hypervapi.py | 2 + nova/tests/virt/hyperv/test_migrationops.py | 10 +++- nova/tests/virt/hyperv/test_utilsfactory.py | 61 +++++++++++++++++++++ nova/tests/virt/hyperv/test_vmops.py | 12 +++- nova/virt/hyperv/utilsfactory.py | 30 ++++++---- 5 files changed, 101 insertions(+), 14 deletions(-) create mode 100644 nova/tests/virt/hyperv/test_utilsfactory.py diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 8a318cfbb3..84a1802364 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -126,6 +126,8 @@ def update(self_fake, context, image_id, image_metadata, f): fake_get_remote_image_service) def fake_check_min_windows_version(fake_self, major, minor): + if [major, minor] >= [6, 3]: + return False return self._check_min_windows_version_satisfied self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version', fake_check_min_windows_version) diff --git a/nova/tests/virt/hyperv/test_migrationops.py b/nova/tests/virt/hyperv/test_migrationops.py index cfd8777103..0af56d55fe 100644 --- a/nova/tests/virt/hyperv/test_migrationops.py +++ b/nova/tests/virt/hyperv/test_migrationops.py @@ -26,8 +26,14 @@ class MigrationOpsTestCase(test.NoDBTestCase): def setUp(self): super(MigrationOpsTestCase, self).setUp() self.context = 'fake-context' - self.flags(force_hyperv_utils_v1=True, group='hyperv') - self.flags(force_volumeutils_v1=True, group='hyperv') + + # utilsfactory will check the host OS version via get_hostutils, + # in order to return the proper Utils Class, so it must be mocked. + patched_func = mock.patch.object(migrationops.utilsfactory, + "get_hostutils") + patched_func.start() + self.addCleanup(patched_func.stop) + self._migrationops = migrationops.MigrationOps() def test_check_and_attach_config_drive_unknown_path(self): diff --git a/nova/tests/virt/hyperv/test_utilsfactory.py b/nova/tests/virt/hyperv/test_utilsfactory.py new file mode 100644 index 0000000000..58e2b2988c --- /dev/null +++ b/nova/tests/virt/hyperv/test_utilsfactory.py @@ -0,0 +1,61 @@ +# Copyright 2014 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for the Hyper-V utils factory. +""" + +import mock +from oslo.config import cfg + +from nova import test +from nova.virt.hyperv import hostutils +from nova.virt.hyperv import utilsfactory +from nova.virt.hyperv import vmutils +from nova.virt.hyperv import vmutilsv2 + +CONF = cfg.CONF + + +class TestHyperVUtilsFactory(test.NoDBTestCase): + + def setUp(self): + super(TestHyperVUtilsFactory, self).setUp() + + def test_get_vmutils_force_v1_and_min_version(self): + self._test_returned_class(None, True, True) + + def test_get_vmutils_v2(self): + self._test_returned_class(vmutilsv2.VMUtilsV2, False, True) + + def test_get_vmutils_v2_r2(self): + self._test_returned_class(vmutils.VMUtils, False, False) + + def test_get_vmutils_force_v1_and_not_min_version(self): + self._test_returned_class(vmutils.VMUtils, True, False) + + def _test_returned_class(self, expected_class, force_v1, os_supports_v2): + CONF.set_override('force_hyperv_utils_v1', force_v1, 'hyperv') + with mock.patch.object( + hostutils.HostUtils, + 'check_min_windows_version') as mock_check_min_windows_version: + mock_check_min_windows_version.return_value = os_supports_v2 + + if os_supports_v2 and force_v1: + self.assertRaises(vmutils.HyperVException, + utilsfactory.get_vmutils) + else: + actual_class = type(utilsfactory.get_vmutils()) + self.assertEqual(actual_class, expected_class) diff --git a/nova/tests/virt/hyperv/test_vmops.py b/nova/tests/virt/hyperv/test_vmops.py index b8f095b944..518fca8ece 100644 --- a/nova/tests/virt/hyperv/test_vmops.py +++ b/nova/tests/virt/hyperv/test_vmops.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from nova import exception from nova import test from nova.tests import fake_instance @@ -27,8 +29,14 @@ def __init__(self, test_case_name): def setUp(self): super(VMOpsTestCase, self).setUp() self.context = 'fake-context' - self.flags(force_hyperv_utils_v1=True, group='hyperv') - self.flags(force_volumeutils_v1=True, group='hyperv') + + # utilsfactory will check the host OS version via get_hostutils, + # in order to return the proper Utils Class, so it must be mocked. + patched_func = mock.patch.object(vmops.utilsfactory, + "get_hostutils") + patched_func.start() + self.addCleanup(patched_func.stop) + self._vmops = vmops.VMOps() def test_attach_config_drive(self): diff --git a/nova/virt/hyperv/utilsfactory.py b/nova/virt/hyperv/utilsfactory.py index 6259586160..88778008f8 100644 --- a/nova/virt/hyperv/utilsfactory.py +++ b/nova/virt/hyperv/utilsfactory.py @@ -15,6 +15,7 @@ from oslo.config import cfg +from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt.hyperv import hostutils from nova.virt.hyperv import livemigrationutils @@ -57,20 +58,30 @@ def _get_class(v1_class, v2_class, force_v1_flag): return cls +def _get_virt_utils_class(v1_class, v2_class): + # The "root/virtualization" WMI namespace is no longer supported on + # Windows Server / Hyper-V Server 2012 R2 / Windows 8.1 + # (kernel version 6.3) or above. + if (CONF.hyperv.force_hyperv_utils_v1 and + get_hostutils().check_min_windows_version(6, 3)): + raise vmutils.HyperVException( + _('The "force_hyperv_utils_v1" option cannot be set to "True" ' + 'on Windows Server / Hyper-V Server 2012 R2 or above as the WMI ' + '"root/virtualization" namespace is no longer supported.')) + return _get_class(v1_class, v2_class, CONF.hyperv.force_hyperv_utils_v1) + + def get_vmutils(host='.'): - return _get_class(vmutils.VMUtils, vmutilsv2.VMUtilsV2, - CONF.hyperv.force_hyperv_utils_v1)(host) + return _get_virt_utils_class(vmutils.VMUtils, vmutilsv2.VMUtilsV2)(host) def get_vhdutils(): - return _get_class(vhdutils.VHDUtils, vhdutilsv2.VHDUtilsV2, - CONF.hyperv.force_hyperv_utils_v1)() + return _get_virt_utils_class(vhdutils.VHDUtils, vhdutilsv2.VHDUtilsV2)() def get_networkutils(): - return _get_class(networkutils.NetworkUtils, - networkutilsv2.NetworkUtilsV2, - CONF.hyperv.force_hyperv_utils_v1)() + return _get_virt_utils_class(networkutils.NetworkUtils, + networkutilsv2.NetworkUtilsV2)() def get_hostutils(): @@ -91,6 +102,5 @@ def get_livemigrationutils(): def get_rdpconsoleutils(): - return _get_class(rdpconsoleutils.RDPConsoleUtils, - rdpconsoleutilsv2.RDPConsoleUtilsV2, - CONF.hyperv.force_hyperv_utils_v1)() + return _get_virt_utils_class(rdpconsoleutils.RDPConsoleUtils, + rdpconsoleutilsv2.RDPConsoleUtilsV2)() From 6b796597d57ebbd1b0a0c31195dd7b5aa53fce7a Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 14 Aug 2014 10:27:28 -0700 Subject: [PATCH 366/486] Add graphviz to list of distro packages to install Commit a507d42cf5d9912c2b3622e84afb8b7d3278595b makes the doc builds fail on warnings, so people are probably going to be running 'tox -e docs' more often. To run that you need the graphviz package from the distro, so add it to the list of packages needed when setting up a development environment. Change-Id: I47ffe63fddfbbd73c3fc698490ad07235caaf459 Closes-Bug: #1356983 --- doc/source/devref/development.environment.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 1e3209e148..652e71a208 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -60,7 +60,7 @@ Install the prerequisite packages. On Ubuntu:: - sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev libvirt-dev + sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev libvirt-dev graphviz On Ubuntu Precise (12.04) you may also need to add the following packages:: @@ -72,7 +72,7 @@ On Ubuntu Precise (12.04) you may also need to add the following packages:: On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: - sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel + sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz sudo pip-python install tox From 65d8c81ddaa0419c2273674de84f80ee1354b2f0 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 12 Aug 2014 18:06:01 -0700 Subject: [PATCH 367/486] Treat instance like an object in _start_building The _start_building method gets an instance object but was treating it like a dict, so this change uses dot notation on the instance object fields. Adds a unit test since this method wasn't directly unit tested before. Part of blueprint compute-manager-objects-juno Change-Id: I8d6b23c70e82fbc7740f3d6af1362905b895addb --- nova/compute/manager.py | 2 +- nova/tests/compute/test_compute_mgr.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b3064f6e49..893aced63b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1523,7 +1523,7 @@ def _start_building(self, context, instance): """Save the host and launched_on fields and log appropriately.""" LOG.audit(_('Starting instance...'), context=context, instance=instance) - self._instance_update(context, instance['uuid'], + self._instance_update(context, instance.uuid, vm_state=vm_states.BUILDING, task_state=None, expected_task_state=(task_states.SCHEDULING, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 19f17d1b08..5ad29b4d9b 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1792,6 +1792,15 @@ def test_cleanup_volumes_exception_raise(self): calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms] self.assertEqual(calls, volume_delete.call_args_list) + def test_start_building(self): + instance = fake_instance.fake_instance_obj(self.context) + with mock.patch.object(self.compute, '_instance_update') as update: + self.compute._start_building(self.context, instance) + update.assert_called_once_with( + self.context, instance.uuid, vm_state=vm_states.BUILDING, + task_state=None, expected_task_state=(task_states.SCHEDULING, + None)) + class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): def setUp(self): From 5876827d92bad3c408651184d9fea835639d8db8 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 12 Aug 2014 18:31:54 -0700 Subject: [PATCH 368/486] Treat instance like an object in _prebuild_instance The _prebuild_instance method is given an instance object but was treating it like a primitive dict, so this change uses dot notation to enforce the object usage. Adds unit tests since this wasn't directly unit tested before. Part of blueprint compute-manager-objects-juno Change-Id: Id22eb46ed3b4bd8fb48189a40f9df9bb114cd3d4 --- nova/compute/manager.py | 2 +- nova/tests/compute/test_compute_mgr.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 893aced63b..ecc5982a35 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1258,7 +1258,7 @@ def _prebuild_instance(self, context, instance): exception.UnexpectedDeletingTaskStateError): msg = _("Instance disappeared before we could start it") # Quickly bail out of here - raise exception.BuildAbortException(instance_uuid=instance['uuid'], + raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) def _validate_instance_group_policy(self, context, instance, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 5ad29b4d9b..463feff8cd 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1801,6 +1801,32 @@ def test_start_building(self): task_state=None, expected_task_state=(task_states.SCHEDULING, None)) + def _test_prebuild_instance_build_abort_exception(self, exc): + instance = fake_instance.fake_instance_obj(self.context) + with contextlib.nested( + mock.patch.object(self.compute, '_check_instance_exists'), + mock.patch.object(self.compute, '_start_building', + side_effect=exc) + ) as ( + check, start + ): + # run the code + self.assertRaises(exception.BuildAbortException, + self.compute._prebuild_instance, + self.context, instance) + # assert the calls + check.assert_called_once_with(self.context, instance) + start.assert_called_once_with(self.context, instance) + + def test_prebuild_instance_instance_not_found(self): + self._test_prebuild_instance_build_abort_exception( + exception.InstanceNotFound(instance_id='fake')) + + def test_prebuild_instance_unexpected_deleting_task_state_err(self): + self._test_prebuild_instance_build_abort_exception( + exception.UnexpectedDeletingTaskStateError(expected='foo', + actual='bar')) + class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): def setUp(self): From 26aeee429e77d518f39fb6057c5768e7edc0b281 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 12 Aug 2014 19:24:13 -0700 Subject: [PATCH 369/486] Treat instance like object in _validate_instance_group_policy The _validate_instance_group_policy method gets passed an instance object but was treating it like a primitive dict, so this changes it to use dot notation when accessing fields on the instance object. Updates the existing test to pass an instance object. Part of blueprint compute-manager-objects-juno Change-Id: Icbe9fa06ad8f1b32a556241ec7b3269b1fcd5cd2 --- nova/compute/manager.py | 4 ++-- nova/tests/compute/test_compute.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ecc5982a35..3831f6211f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1281,11 +1281,11 @@ def _do_validation(context, instance, group_hint): if 'anti-affinity' not in group.policies: return - group_hosts = group.get_hosts(context, exclude=[instance['uuid']]) + group_hosts = group.get_hosts(context, exclude=[instance.uuid]) if self.host in group_hosts: msg = _("Anti-affinity instance group policy was violated.") raise exception.RescheduledException( - instance_uuid=instance['uuid'], + instance_uuid=instance.uuid, reason=msg) _do_validation(context, instance, group_hint) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index d20002fb0e..e921f91570 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -3753,7 +3753,7 @@ def _create_server_group(self): def _run_instance_reschedules_on_anti_affinity_violation(self, group, hint): - instance = jsonutils.to_primitive(self._create_fake_instance()) + instance = self._create_fake_instance_obj() filter_properties = {'scheduler_hints': {'group': hint}} self.assertRaises(exception.RescheduledException, self.compute._build_instance, From f6f904a994851bc0abfd2e01a019fdccc21032d0 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 13 Aug 2014 08:51:01 -0700 Subject: [PATCH 370/486] nova-network: treat instance like object in allocate_for_instance The allocate_for_instance method gets an instance object from the compute manager so treat it like an object by using dot notation. This change updates the docstrings for the base API and nova-network API methods and updates the nova-network API unit tests to use instance objects when testing allocate_for_instance. Part of blueprint compute-manager-objects-juno Change-Id: Ia6d1d8dd46820d48cd9a3f3d4e98fb48e85d1188 --- nova/network/api.py | 8 ++++---- nova/network/base_api.py | 2 +- nova/tests/network/test_api.py | 4 +++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/nova/network/api.py b/nova/network/api.py index 13cd114d54..15c1656a92 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -219,7 +219,7 @@ def allocate_for_instance(self, context, instance, vpn, """Allocates all network structures for an instance. :param context: The request context. - :param instance: An Instance dict. + :param instance: nova.objects.instance.Instance object. :param vpn: A boolean, if True, indicate a vpn to access the instance. :param requested_networks: A dictionary of requested_networks, Optional value containing network_id, fixed_ip, and port_id. @@ -243,9 +243,9 @@ def allocate_for_instance(self, context, instance, vpn, args = {} args['vpn'] = vpn args['requested_networks'] = requested_networks - args['instance_id'] = instance['uuid'] - args['project_id'] = instance['project_id'] - args['host'] = instance['host'] + args['instance_id'] = instance.uuid + args['project_id'] = instance.project_id + args['host'] = instance.host args['rxtx_factor'] = flavor['rxtx_factor'] args['macs'] = macs args['dhcp_options'] = dhcp_options diff --git a/nova/network/base_api.py b/nova/network/base_api.py index dd712ac419..0ac93e287a 100644 --- a/nova/network/base_api.py +++ b/nova/network/base_api.py @@ -171,7 +171,7 @@ def allocate_for_instance(self, context, instance, vpn, """Allocates all network structures for an instance. :param context: The request context. - :param instance: An Instance dict. + :param instance: nova.objects.instance.Instance object. :param vpn: A boolean, if True, indicate a vpn to access the instance. :param requested_networks: A dictionary of requested_networks, Optional value containing network_id, fixed_ip, and port_id. diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index 273b2d7415..ff6a3772ee 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -154,8 +154,10 @@ def test_allocate_for_instance_handles_macs_passed(self): flavor = flavors.get_default_flavor() flavor['rxtx_factor'] = 0 sys_meta = flavors.save_flavor_info({}, flavor) - instance = dict(id='id', uuid='uuid', project_id='project_id', + instance = dict(id=1, uuid='uuid', project_id='project_id', host='host', system_metadata=utils.dict_to_metadata(sys_meta)) + instance = fake_instance.fake_instance_obj( + self.context, expected_attrs=['system_metadata'], **instance) self.network_api.allocate_for_instance( self.context, instance, 'vpn', 'requested_networks', macs=macs) From dad0dedb32a1e6b900192288316af50dd13663a1 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 13 Aug 2014 10:46:25 -0700 Subject: [PATCH 371/486] neutronv2: treat instance like object in allocate_for_instance The allocate_for_instance method gets an instance object from the compute manager so treat it like an object by using dot notation. Updates the existing unit tests to use an instance object. Part of blueprint compute-manager-objects-juno Change-Id: Ia9305a67d1b9bd8601443da3288f710ab02b3f37 --- nova/network/neutronv2/api.py | 16 ++++---- .../contrib/test_neutron_security_groups.py | 19 ++++----- nova/tests/network/test_neutronv2.py | 39 +++++++++++++------ 3 files changed, 46 insertions(+), 28 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index f8be2d9dd3..971b612166 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -237,6 +237,8 @@ def _create_port(self, port_client, instance, network_id, port_req_body, def allocate_for_instance(self, context, instance, **kwargs): """Allocate network resources for the instance. + :param context: The request context. + :param instance: nova.objects.instance.Instance object. :param requested_networks: optional value containing network_id, fixed_ip, and port_id :param security_groups: security groups to allocate for instance @@ -263,10 +265,10 @@ def allocate_for_instance(self, context, instance, **kwargs): available_macs = set(hypervisor_macs) neutron = neutronv2.get_client(context) LOG.debug('allocate_for_instance()', instance=instance) - if not instance['project_id']: + if not instance.project_id: msg = _('empty project id for instance %s') raise exception.InvalidInput( - reason=msg % instance['uuid']) + reason=msg % instance.uuid) requested_networks = kwargs.get('requested_networks') dhcp_opts = kwargs.get('dhcp_options', None) ports = {} @@ -281,7 +283,7 @@ def allocate_for_instance(self, context, instance, **kwargs): if hypervisor_macs is not None: if port['mac_address'] not in hypervisor_macs: raise exception.PortNotUsable(port_id=port_id, - instance=instance['uuid']) + instance=instance.uuid) else: # Don't try to use this MAC if we need to create a # port on the fly later. Identical MACs may be @@ -294,7 +296,7 @@ def allocate_for_instance(self, context, instance, **kwargs): net_ids.append(network_id) ordered_networks.append((network_id, fixed_ip, port_id)) - nets = self._get_available_networks(context, instance['project_id'], + nets = self._get_available_networks(context, instance.project_id, net_ids) if not nets: LOG.warn(_LW("No network configured!"), instance=instance) @@ -319,7 +321,7 @@ def allocate_for_instance(self, context, instance, **kwargs): # TODO(arosen) Should optimize more to do direct query for security # group if len(security_groups) == 1 if len(security_groups): - search_opts = {'tenant_id': instance['project_id']} + search_opts = {'tenant_id': instance.project_id} user_security_groups = neutron.list_security_groups( **search_opts).get('security_groups') @@ -377,8 +379,8 @@ def allocate_for_instance(self, context, instance, **kwargs): raise exception.SecurityGroupCannotBeApplied() network_id = network['id'] - zone = 'compute:%s' % instance['availability_zone'] - port_req_body = {'port': {'device_id': instance['uuid'], + zone = 'compute:%s' % instance.availability_zone + port_req_body = {'port': {'device_id': instance.uuid, 'device_owner': zone}} try: self._populate_neutron_extension_values(context, instance, diff --git a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py index 0fffeeaeb0..50a786e8bd 100644 --- a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py @@ -30,6 +30,7 @@ from nova.network import neutronv2 from nova.network.neutronv2 import api as neutron_api from nova.network.security_group import neutron_driver +from nova.objects import instance as instance_obj from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack.compute.contrib import test_security_groups @@ -174,16 +175,16 @@ def test_delete_security_group_by_admin(self): def test_delete_security_group_in_use(self): sg = self._create_sg_template().get('security_group') self._create_network() - fake_instance = {'project_id': 'fake_tenant', - 'availability_zone': 'zone_one', - 'info_cache': {'network_info': []}, - 'security_groups': [], - 'uuid': str(uuid.uuid4()), - 'display_name': 'test_instance'} + db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[]) + _context = context.get_admin_context() + instance = instance_obj.Instance._from_db_object( + _context, instance_obj.Instance(), db_inst, + expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS) neutron = neutron_api.API() - neutron.allocate_for_instance(context.get_admin_context(), - fake_instance, - security_groups=[sg['id']]) + with mock.patch.object(nova.db, 'instance_get_by_uuid', + return_value=db_inst): + neutron.allocate_for_instance(_context, instance, + security_groups=[sg['id']]) req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' % sg['id']) diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 3f33c5c216..d3eb08f2b6 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -339,6 +339,13 @@ def setUp(self): self.addCleanup(self.stubs.UnsetAll) def _stub_allocate_for_instance(self, net_idx=1, **kwargs): + # TODO(mriedem): Remove this conversion when all neutronv2 APIs are + # converted to handling instance objects. + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) + self.instance2 = fake_instance.fake_instance_obj(self.context, + **self.instance2) + api = neutronapi.API() self.mox.StubOutWithMock(api, 'get_instance_nw_info') has_portbinding = False @@ -379,7 +386,7 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): 'network_id': 'my_netid1', 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and - self.instance2['uuid'] or + self.instance2.uuid or ''}}) ports['my_netid1'] = [self.port_data1[0], self.port_data3[0]] @@ -393,7 +400,7 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): 'network_id': 'my_netid1', 'mac_address': 'my_mac1', 'device_id': kwargs.get('_device') and - self.instance2['uuid'] or + self.instance2.uuid or ''}}) ports[port_id] = self.port_data1[0] n_id = 'my_netid1' @@ -417,7 +424,7 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) else: - mox_list_params = {'tenant_id': self.instance['project_id'], + mox_list_params = {'tenant_id': self.instance.project_id, 'shared': False} self.moxed_client.list_networks( **mox_list_params).AndReturn({'networks': nets}) @@ -436,7 +443,7 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): for net_id, fixed_ip, port_id in ordered_networks: port_req_body = { 'port': { - 'device_id': self.instance['uuid'], + 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } @@ -478,7 +485,7 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs): port_req_body['port']['network_id'] = net_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = \ - self.instance['project_id'] + self.instance.project_id if macs: port_req_body['port']['mac_address'] = macs.pop() if has_portbinding: @@ -948,9 +955,11 @@ def test_allocate_for_instance_with_requested_networks_with_port(self): def test_allocate_for_instance_no_networks(self): """verify the exception thrown when there are no networks defined.""" + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) api = neutronapi.API() self.moxed_client.list_networks( - tenant_id=self.instance['project_id'], + tenant_id=self.instance.project_id, shared=False).AndReturn( {'networks': model.NetworkInfo([])}) self.moxed_client.list_networks(shared=True).AndReturn( @@ -966,6 +975,8 @@ def test_allocate_for_instance_ex1(self): Mox to raise exception when creating a second port. In this case, the code should delete the first created port. """ + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') @@ -980,7 +991,7 @@ def test_allocate_for_instance_ex1(self): for network in self.nets2: binding_port_req_body = { 'port': { - 'device_id': self.instance['uuid'], + 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } @@ -988,7 +999,7 @@ def test_allocate_for_instance_ex1(self): 'port': { 'network_id': network['id'], 'admin_state_up': True, - 'tenant_id': self.instance['project_id'], + 'tenant_id': self.instance.project_id, }, } port_req_body['port'].update(binding_port_req_body['port']) @@ -1018,6 +1029,8 @@ def test_allocate_for_instance_ex2(self): Mox to raise exception when creating the first port. In this case, the code should not delete any ports. """ + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) api = neutronapi.API() self.mox.StubOutWithMock(api, '_populate_neutron_extension_values') self.mox.StubOutWithMock(api, '_has_port_binding_extension') @@ -1030,7 +1043,7 @@ def test_allocate_for_instance_ex2(self): id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2}) binding_port_req_body = { 'port': { - 'device_id': self.instance['uuid'], + 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', }, } @@ -1038,8 +1051,8 @@ def test_allocate_for_instance_ex2(self): 'port': { 'network_id': self.nets2[0]['id'], 'admin_state_up': True, - 'device_id': self.instance['uuid'], - 'tenant_id': self.instance['project_id'], + 'device_id': self.instance.uuid, + 'tenant_id': self.instance.project_id, }, } api._populate_neutron_extension_values(self.context, @@ -1055,11 +1068,13 @@ def test_allocate_for_instance_ex2(self): def test_allocate_for_instance_no_port_or_network(self): class BailOutEarly(Exception): pass + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) api = neutronapi.API() self.mox.StubOutWithMock(api, '_get_available_networks') # Make sure we get an empty list and then bail out of the rest # of the function - api._get_available_networks(self.context, self.instance['project_id'], + api._get_available_networks(self.context, self.instance.project_id, []).AndRaise(BailOutEarly) self.mox.ReplayAll() self.assertRaises(BailOutEarly, From 85511ad9a5b101455feffdbd889a6211eb388cc1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 14 Aug 2014 19:20:48 +0000 Subject: [PATCH 372/486] Updated from global requirements Change-Id: I21afa9b0b3465ef24246a5c20bc7ebfcd2f6b780 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a26949c6d8..7fb504cc45 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ python-glanceclient>=0.13.1 python-keystoneclient>=0.10.0 six>=1.7.0 stevedore>=0.14 -websockify>=0.5.1,<0.6 +websockify>=0.5.1,<0.7 wsgiref>=0.1.2 oslo.config>=1.4.0.0a3 oslo.rootwrap>=1.3.0.0a1 From e36aca4f199d08daead7500abcb4e0f58f01679d Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Thu, 5 Jun 2014 16:31:00 +0200 Subject: [PATCH 373/486] Make NovaObjectSerializer work with dicts Previously we would only look at dicts when deserializing and only to make sure if it is in fact a serialized version of an object. This patch makes the NovaObjectSerializer treat dicts as iterables and attempt to serialize/deserialize them recursively. This behaviour is needed for some of the cells RPC proxy calls that would make a dict out of all the kwargs to a standard RPC call and then pass it to the client like that (likely done to avoid having to bump versions of cell RPC calls whenever we do it for the corresponding non-cell one). Without this change tho - we can't have objects in any of such calls as they would not get serialized. We also change how @remotable decorator treats update dicts it gets back from the object_action rpc call, as any fields that are ObjectFields, and were updated during the remote call in the conductor, would have already been hydrated by the NovaObjectSerializer's new feature. Part of blueprint: compute-manager-objects-juno Change-Id: I8560b692f7150bb01bc2a3ca49b264e587a1e5d1 --- nova/objects/base.py | 22 +++++++++++++----- nova/tests/objects/test_objects.py | 37 ++++++++++++++++++++++++++---- 2 files changed, 48 insertions(+), 11 deletions(-) diff --git a/nova/objects/base.py b/nova/objects/base.py index 7830601ec8..45b258c64e 100644 --- a/nova/objects/base.py +++ b/nova/objects/base.py @@ -188,7 +188,13 @@ def wrapper(self, *args, **kwargs): for key, value in updates.iteritems(): if key in self.fields: field = self.fields[key] - self[key] = field.from_primitive(self, key, value) + # NOTE(ndipanov): Since NovaObjectSerializer will have + # deserialized any object fields into objects already, + # we do not try to deserialize them again here. + if isinstance(value, NovaObject): + self[key] = value + else: + self[key] = field.from_primitive(self, key, value) self.obj_reset_changes() self._changed_fields = set(updates.get('obj_what_changed', [])) return result @@ -616,15 +622,19 @@ def _process_iterable(self, context, action_fn, values): items from values having had action applied. """ iterable = values.__class__ - if iterable == set: + if issubclass(iterable, dict): + return iterable(**dict((k, action_fn(context, v)) + for k, v in six.iteritems(values))) + else: # NOTE(danms): A set can't have an unhashable value inside, such as # a dict. Convert sets to tuples, which is fine, since we can't # send them over RPC anyway. - iterable = tuple - return iterable([action_fn(context, value) for value in values]) + if iterable == set: + iterable = tuple + return iterable([action_fn(context, value) for value in values]) def serialize_entity(self, context, entity): - if isinstance(entity, (tuple, list, set)): + if isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.serialize_entity, entity) elif (hasattr(entity, 'obj_to_primitive') and @@ -635,7 +645,7 @@ def serialize_entity(self, context, entity): def deserialize_entity(self, context, entity): if isinstance(entity, dict) and 'nova_object.name' in entity: entity = self._process_object(context, entity) - elif isinstance(entity, (tuple, list, set)): + elif isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.deserialize_entity, entity) return entity diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 59fe895d27..3768c65527 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -36,12 +36,18 @@ from nova.tests import fake_notifier +class MyOwnedObject(base.NovaPersistentObject, base.NovaObject): + VERSION = '1.0' + fields = {'baz': fields.Field(fields.Integer())} + + class MyObj(base.NovaPersistentObject, base.NovaObject): VERSION = '1.6' fields = {'foo': fields.Field(fields.Integer()), 'bar': fields.Field(fields.String()), 'missing': fields.Field(fields.String()), 'readonly': fields.Field(fields.Integer(), read_only=True), + 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True) } @staticmethod @@ -88,6 +94,7 @@ def modify_save_modify(self, context): self.bar = 'meow' self.save() self.foo = 42 + self.rel_object = MyOwnedObject(baz=42) def obj_make_compatible(self, primitive, target_version): # NOTE(danms): Simulate an older version that had a different @@ -578,9 +585,10 @@ def test_changed_4(self): obj.bar = 'something' self.assertEqual(obj.obj_what_changed(), set(['bar'])) obj.modify_save_modify(self.context) - self.assertEqual(obj.obj_what_changed(), set(['foo'])) + self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object'])) self.assertEqual(obj.foo, 42) self.assertEqual(obj.bar, 'meow') + self.assertIsInstance(obj.rel_object, MyOwnedObject) self.assertRemotes() def test_changed_with_sub_object(self): @@ -664,7 +672,8 @@ def test_get(self): def test_object_inheritance(self): base_fields = base.NovaPersistentObject.fields.keys() - myobj_fields = ['foo', 'bar', 'missing', 'readonly'] + base_fields + myobj_fields = ['foo', 'bar', 'missing', + 'readonly', 'rel_object'] + base_fields myobj3_fields = ['new_field'] self.assertTrue(issubclass(TestSubclassedObject, MyObj)) self.assertEqual(len(myobj_fields), len(MyObj.fields)) @@ -712,7 +721,7 @@ def test_obj_repr(self): obj = MyObj(foo=123) self.assertEqual('MyObj(bar=,created_at=,deleted=,' 'deleted_at=,foo=123,missing=,readonly=,' - 'updated_at=)', repr(obj)) + 'rel_object=,updated_at=)', repr(obj)) class TestObject(_LocalTest, _TestObject): @@ -892,6 +901,23 @@ def test_object_serialization_iterables(self): self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) + # dict case + thing = {'key': obj} + primitive = ser.serialize_entity(self.context, thing) + self.assertEqual(1, len(primitive)) + for item in primitive.itervalues(): + self.assertNotIsInstance(item, base.NovaObject) + thing2 = ser.deserialize_entity(self.context, primitive) + self.assertEqual(1, len(thing2)) + for item in thing2.itervalues(): + self.assertIsInstance(item, MyObj) + + # object-action updates dict case + thing = {'foo': obj.obj_to_primitive()} + primitive = ser.serialize_entity(self.context, thing) + self.assertEqual(thing, primitive) + thing2 = ser.deserialize_entity(self.context, thing) + self.assertIsInstance(thing2['foo'], base.NovaObject) # NOTE(danms): The hashes in this list should only be changed if @@ -932,7 +958,8 @@ def test_object_serialization_iterables(self): 'KeyPairList': '1.0-854cfff138dac9d5925c89cf805d1a70', 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed', 'MigrationList': '1.1-6ca2ebb822ebfe1a660bace824b378c6', - 'MyObj': '1.6-9039bc29de1c08943771407697c83076', + 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93', + 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298', 'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e', 'NetworkList': '1.2-16510568c6e64cb8b358cb2b11333196', 'PciDevice': '1.1-523c46f960d93f78db55f0280b09441e', @@ -945,7 +972,7 @@ def test_object_serialization_iterables(self): 'SecurityGroupRuleList': '1.0-af4deeea8699ee90fb217f77d711d781', 'Service': '1.2-5a3df338c669e1148251431370b440ef', 'ServiceList': '1.0-ae64b4922df28d7cd11c59cddddf926c', - 'TestSubclassedObject': '1.6-1629421d83f474b7fadc41d3fc0e4998', + 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd', 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2', 'VirtualInterfaceList': '1.0-dc9e9d5bce522d28f96092c49119b3e0', } From d2f5e06988646fed178ae281cebd2bf9ee19f4bc Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 15 Aug 2014 16:24:27 +0900 Subject: [PATCH 374/486] Raise Not Implemented error from V2 diagnostics API In case any driver does not implement functionality of providing VM diagnostics, HTTPNotImplemented error will be raised from driver. Nova V2 API should catch the same and pass it with proper error message for better debugging. This patch catch & raise HTTPNotImplemented exception for V2 "os-server-diagnostics" API. Adding UT also for the same. Change-Id: I485d3487d01eba07e3c34f6ef9b5c707af54ebb7 --- .../openstack/compute/contrib/server_diagnostics.py | 4 ++++ .../compute/contrib/test_server_diagnostics.py | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py index 215f6f4fac..96f5c68627 100644 --- a/nova/api/openstack/compute/contrib/server_diagnostics.py +++ b/nova/api/openstack/compute/contrib/server_diagnostics.py @@ -21,6 +21,7 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception +from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'server_diagnostics') @@ -52,6 +53,9 @@ def index(self, req, server_id): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'get_diagnostics') + except NotImplementedError: + msg = _("Unable to get diagnostics, functionality not implemented") + raise webob.exc.HTTPNotImplemented(explanation=msg) class Server_diagnostics(extensions.ExtensionDescriptor): diff --git a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py index ad0a133987..6ef2400467 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py @@ -80,6 +80,16 @@ def test_get_diagnostics_raise_conflict_on_invalid_state(self, res = req.get_response(self.router) self.assertEqual(409, res.status_int) + @mock.patch.object(compute_api.API, 'get_diagnostics', + side_effect=NotImplementedError) + @mock.patch.object(compute_api.API, 'get', fake_instance_get) + def test_get_diagnostics_raise_no_notimplementederror(self, + mock_get_diagnostics): + req = fakes.HTTPRequest.blank( + '/fake/servers/%s/diagnostics' % UUID) + res = req.get_response(self.router) + self.assertEqual(501, res.status_int) + class TestServerDiagnosticsXMLSerializer(test.NoDBTestCase): namespace = wsgi.XMLNS_V11 From 9513c4d8c9bc5cad7188c350971cc6c656730733 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Wed, 9 Jul 2014 09:58:21 +0100 Subject: [PATCH 375/486] virt: helper for processing NUMA topology configuration Add helper class / methods to nova/virt/hardware.py to assist in handling the various NUMA related image & flavor extra spec properties and calculating host usage. Blueprint: virt-driver-numa-placement Change-Id: I46c92166d5252f3bf9867dc08935df8c636d9b7d --- nova/exception.py | 32 ++++ nova/tests/virt/test_hardware.py | 319 +++++++++++++++++++++++++++++-- nova/virt/hardware.py | 244 +++++++++++++++++++++++ 3 files changed, 577 insertions(+), 18 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 53281a4be9..52302469a4 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1639,3 +1639,35 @@ class ImageVCPULimitsRangeImpossible(Invalid): class InvalidArchitectureName(Invalid): msg_fmt = _("Architecture name '%(arch)s' is not recognised") + + +class ImageNUMATopologyIncomplete(Invalid): + msg_fmt = _("CPU and memory allocation must be provided for all " + "NUMA nodes") + + +class ImageNUMATopologyForbidden(Invalid): + msg_fmt = _("Image property '%(name)s' is not permitted to override " + "NUMA configuration set against the flavor") + + +class ImageNUMATopologyAsymmetric(Invalid): + msg_fmt = _("Asymmetric NUMA topologies require explicit assignment " + "of CPUs and memory to nodes in image or flavor") + + +class ImageNUMATopologyCPUOutOfRange(Invalid): + msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d") + + +class ImageNUMATopologyCPUDuplicates(Invalid): + msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes") + + +class ImageNUMATopologyCPUsUnassigned(Invalid): + msg_fmt = _("CPU number %(cpuset)s is not assigned to any node") + + +class ImageNUMATopologyMemoryOutOfRange(Invalid): + msg_fmt = _("%(memsize)d MB of memory assigned, but expected " + "%(memtotal)d MB") diff --git a/nova/tests/virt/test_hardware.py b/nova/tests/virt/test_hardware.py index 6436556e0a..050c60ea09 100644 --- a/nova/tests/virt/test_hardware.py +++ b/nova/tests/virt/test_hardware.py @@ -18,10 +18,23 @@ class FakeFlavor(): - def __init__(self, vcpus, extra_specs): + def __init__(self, vcpus, memory, extra_specs): self.vcpus = vcpus + self.memory_mb = memory self.extra_specs = extra_specs + def __getitem__(self, item): + try: + return getattr(self, item) + except AttributeError: + raise KeyError(item) + + def get(self, item, default=None): + try: + return getattr(self, item) + except AttributeError: + return default + class CpuSetTestCase(test.NoDBTestCase): def test_get_vcpu_pin_set(self): @@ -169,7 +182,7 @@ class VCPUTopologyTest(test.NoDBTestCase): def test_validate_config(self): testdata = [ { # Flavor sets preferred topology only - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", @@ -182,7 +195,7 @@ def test_validate_config(self): ) }, { # Image topology overrides flavor - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", @@ -200,7 +213,7 @@ def test_validate_config(self): ) }, { # Partial image topology overrides flavor - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", @@ -215,7 +228,7 @@ def test_validate_config(self): ) }, { # Restrict use of threads - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_threads": "2", }), "image": { @@ -228,7 +241,7 @@ def test_validate_config(self): ) }, { # Force use of at least two sockets - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), @@ -240,7 +253,7 @@ def test_validate_config(self): ) }, { # Image limits reduce flavor - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), @@ -254,7 +267,7 @@ def test_validate_config(self): ) }, { # Image limits kill flavor preferred - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", @@ -269,7 +282,7 @@ def test_validate_config(self): ) }, { # Image limits cannot exceed flavor - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), @@ -281,7 +294,7 @@ def test_validate_config(self): "expect": exception.ImageVCPULimitsRangeExceeded, }, { # Image preferred cannot exceed flavor - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), @@ -517,7 +530,7 @@ def test_best_config(self): testdata = [ { # Flavor sets preferred topology only "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" @@ -529,7 +542,7 @@ def test_best_config(self): }, { # Image topology overrides flavor "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", @@ -546,7 +559,7 @@ def test_best_config(self): }, { # Image topology overrides flavor "allow_threads": False, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1", @@ -563,7 +576,7 @@ def test_best_config(self): }, { # Partial image topology overrides flavor "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "8", "hw:cpu_cores": "2", "hw:cpu_threads": "1" @@ -577,7 +590,7 @@ def test_best_config(self): }, { # Restrict use of threads "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_threads": "1" }), "image": { @@ -587,7 +600,7 @@ def test_best_config(self): }, { # Force use of at least two sockets "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", }), @@ -598,7 +611,7 @@ def test_best_config(self): }, { # Image limits reduce flavor "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_max_sockets": "8", "hw:cpu_max_cores": "8", "hw:cpu_max_threads": "1", @@ -612,7 +625,7 @@ def test_best_config(self): }, { # Image limits kill flavor preferred "allow_threads": True, - "flavor": FakeFlavor(16, { + "flavor": FakeFlavor(16, 2048, { "hw:cpu_sockets": "2", "hw:cpu_cores": "8", "hw:cpu_threads": "1", @@ -635,3 +648,273 @@ def test_best_config(self): self.assertEqual(topo_test["expect"][0], topology.sockets) self.assertEqual(topo_test["expect"][1], topology.cores) self.assertEqual(topo_test["expect"][2], topology.threads) + + +class NUMATopologyTest(test.NoDBTestCase): + + def test_topology_constraints(self): + testdata = [ + { + "flavor": FakeFlavor(8, 2048, { + }), + "image": { + }, + "expect": None, + }, + { + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2 + }), + "image": { + }, + "expect": hw.VirtNUMAInstanceTopology( + [ + hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024), + hw.VirtNUMATopologyCell(1, set([4, 5, 6, 7]), 1024), + ]), + }, + { + # vcpus is not a multiple of nodes, so it + # is an error to not provide cpu/mem mapping + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 3 + }), + "image": { + }, + "expect": exception.ImageNUMATopologyAsymmetric, + }, + { + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 3, + "hw:numa_cpus.0": "0-3", + "hw:numa_mem.0": "1024", + "hw:numa_cpus.1": "4,6", + "hw:numa_mem.1": "512", + "hw:numa_cpus.2": "5,7", + "hw:numa_mem.2": "512", + }), + "image": { + }, + "expect": hw.VirtNUMAInstanceTopology( + [ + hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024), + hw.VirtNUMATopologyCell(1, set([4, 6]), 512), + hw.VirtNUMATopologyCell(2, set([5, 7]), 512), + ]), + }, + { + # Request a CPU that is out of range + # wrt vCPU count + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 1, + "hw:numa_cpus.0": "0-16", + "hw:numa_mem.0": "2048", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyCPUOutOfRange, + }, + { + # Request the same CPU in two nodes + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + "hw:numa_cpus.0": "0-7", + "hw:numa_mem.0": "1024", + "hw:numa_cpus.1": "0-7", + "hw:numa_mem.1": "1024", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyCPUDuplicates, + }, + { + # Request with some CPUs not assigned + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + "hw:numa_cpus.0": "0-2", + "hw:numa_mem.0": "1024", + "hw:numa_cpus.1": "3-4", + "hw:numa_mem.1": "1024", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyCPUsUnassigned, + }, + { + # Request too little memory vs flavor total + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + "hw:numa_cpus.0": "0-3", + "hw:numa_mem.0": "512", + "hw:numa_cpus.1": "4-7", + "hw:numa_mem.1": "512", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyMemoryOutOfRange, + }, + { + # Request too much memory vs flavor total + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + "hw:numa_cpus.0": "0-3", + "hw:numa_mem.0": "1576", + "hw:numa_cpus.1": "4-7", + "hw:numa_mem.1": "1576", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyMemoryOutOfRange, + }, + { + # Request missing mem.0 + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + "hw:numa_cpus.0": "0-3", + "hw:numa_mem.1": "1576", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyIncomplete, + }, + { + # Request missing cpu.0 + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + "hw:numa_mem.0": "1576", + "hw:numa_cpus.1": "4-7", + }), + "image": { + }, + "expect": exception.ImageNUMATopologyIncomplete, + }, + { + # Image attempts to override flavor + "flavor": FakeFlavor(8, 2048, { + "hw:numa_nodes": 2, + }), + "image": { + "hw_numa_nodes": 4, + }, + "expect": exception.ImageNUMATopologyForbidden, + }, + ] + + for testitem in testdata: + if testitem["expect"] is None: + topology = hw.VirtNUMAInstanceTopology.get_constraints( + testitem["flavor"], testitem["image"]) + self.assertIsNone(topology) + elif type(testitem["expect"]) == type: + self.assertRaises(testitem["expect"], + hw.VirtNUMAInstanceTopology.get_constraints, + testitem["flavor"], + testitem["image"]) + else: + topology = hw.VirtNUMAInstanceTopology.get_constraints( + testitem["flavor"], testitem["image"]) + self.assertEqual(len(testitem["expect"].cells), + len(topology.cells)) + for i in range(len(topology.cells)): + self.assertEqual(testitem["expect"].cells[i].cpuset, + topology.cells[i].cpuset) + self.assertEqual(testitem["expect"].cells[i].memory, + topology.cells[i].memory) + + def test_host_usage_contiguous(self): + hosttopo = hw.VirtNUMAHostTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024), + hw.VirtNUMATopologyCell(1, set([4, 6]), 512), + hw.VirtNUMATopologyCell(2, set([5, 7]), 512), + ]) + instance1 = hw.VirtNUMAInstanceTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 256), + hw.VirtNUMATopologyCell(1, set([4]), 256), + ]) + instance2 = hw.VirtNUMAInstanceTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1]), 256), + hw.VirtNUMATopologyCell(1, set([5, 7]), 256), + ]) + + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + hosttopo, [instance1, instance2]) + + self.assertEqual(len(hosttopo), len(hostusage)) + + self.assertIsInstance(hostusage.cells[0], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hosttopo.cells[0].cpuset, + hostusage.cells[0].cpuset) + self.assertEqual(hosttopo.cells[0].memory, + hostusage.cells[0].memory) + self.assertEqual(hostusage.cells[0].cpu_usage, 5) + self.assertEqual(hostusage.cells[0].memory_usage, 512) + + self.assertIsInstance(hostusage.cells[1], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hosttopo.cells[1].cpuset, + hostusage.cells[1].cpuset) + self.assertEqual(hosttopo.cells[1].memory, + hostusage.cells[1].memory) + self.assertEqual(hostusage.cells[1].cpu_usage, 3) + self.assertEqual(hostusage.cells[1].memory_usage, 512) + + self.assertIsInstance(hostusage.cells[2], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hosttopo.cells[2].cpuset, + hostusage.cells[2].cpuset) + self.assertEqual(hosttopo.cells[2].memory, + hostusage.cells[2].memory) + self.assertEqual(hostusage.cells[2].cpu_usage, 0) + self.assertEqual(hostusage.cells[2].memory_usage, 0) + + def test_host_usage_sparse(self): + hosttopo = hw.VirtNUMAHostTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024), + hw.VirtNUMATopologyCell(5, set([4, 6]), 512), + hw.VirtNUMATopologyCell(6, set([5, 7]), 512), + ]) + instance1 = hw.VirtNUMAInstanceTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 256), + hw.VirtNUMATopologyCell(6, set([4]), 256), + ]) + instance2 = hw.VirtNUMAInstanceTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1]), 256), + hw.VirtNUMATopologyCell(5, set([5, 7]), 256), + ]) + + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + hosttopo, [instance1, instance2]) + + self.assertEqual(len(hosttopo), len(hostusage)) + + self.assertIsInstance(hostusage.cells[0], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hosttopo.cells[0].id, + hostusage.cells[0].id) + self.assertEqual(hosttopo.cells[0].cpuset, + hostusage.cells[0].cpuset) + self.assertEqual(hosttopo.cells[0].memory, + hostusage.cells[0].memory) + self.assertEqual(hostusage.cells[0].cpu_usage, 5) + self.assertEqual(hostusage.cells[0].memory_usage, 512) + + self.assertIsInstance(hostusage.cells[1], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hosttopo.cells[1].id, + hostusage.cells[1].id) + self.assertEqual(hosttopo.cells[1].cpuset, + hostusage.cells[1].cpuset) + self.assertEqual(hosttopo.cells[1].memory, + hostusage.cells[1].memory) + self.assertEqual(hostusage.cells[1].cpu_usage, 2) + self.assertEqual(hostusage.cells[1].memory_usage, 256) + + self.assertIsInstance(hostusage.cells[2], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hosttopo.cells[2].cpuset, + hostusage.cells[2].cpuset) + self.assertEqual(hosttopo.cells[2].memory, + hostusage.cells[2].memory) + self.assertEqual(hostusage.cells[2].cpu_usage, 1) + self.assertEqual(hostusage.cells[2].memory_usage, 256) diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py index 5670fd5bd1..d1864915af 100644 --- a/nova/virt/hardware.py +++ b/nova/virt/hardware.py @@ -480,3 +480,247 @@ def get_best_config(flavor, image_meta, allow_threads=True): return VirtCPUTopology.get_desirable_configs(flavor, image_meta, allow_threads)[0] + + +class VirtNUMATopologyCell(object): + """Class for reporting NUMA resources in a cell + + The VirtNUMATopologyCell class represents the + hardware resources present in a NUMA cell. + """ + + def __init__(self, id, cpuset, memory): + """Create a new NUMA Cell + + :param id: integer identifier of cell + :param cpuset: set containing list of CPU indexes + :param memory: RAM measured in KiB + + Creates a new NUMA cell object to record the hardware + resources. + + :returns: a new NUMA cell object + """ + + super(VirtNUMATopologyCell, self).__init__() + + self.id = id + self.cpuset = cpuset + self.memory = memory + + +class VirtNUMATopologyCellUsage(VirtNUMATopologyCell): + """Class for reporting NUMA resources and usage in a cell + + The VirtNUMATopologyCellUsage class specializes + VirtNUMATopologyCell to include information about the + utilization of hardware resources in a NUMA cell. + """ + + def __init__(self, id, cpuset, memory, cpu_usage, memory_usage): + """Create a new NUMA Cell with usage + + :param id: integer identifier of cell + :param cpuset: set containing list of CPU indexes + :param memory: RAM measured in KiB + :param cpu_usage: number of CPUs allocated + :param memory_usage: RAM allocated in KiB + + Creates a new NUMA cell object to record the hardware + resources and utilization. The number of CPUs specified + by the @cpu_usage parameter may be larger than the number + of bits set in @cpuset if CPU overcommit is used. Likewise + the amount of RAM specified by the @memory_usage parameter + may be larger than the available RAM in @memory if RAM + overcommit is used. + + :returns: a new NUMA cell object + """ + + super(VirtNUMATopologyCellUsage, self).__init__( + id, cpuset, memory) + + self.cpu_usage = cpu_usage + self.memory_usage = memory_usage + + +class VirtNUMATopology(object): + """Base class for tracking NUMA topology information + + The VirtNUMATopology class represents the NUMA hardware + topology for memory and CPUs in any machine. It is + later specialized for handling either guest instance + or compute host NUMA topology. + """ + + def __init__(self, cells=None): + """Create a new NUMA topology object + + :param cells: list of VirtNUMATopologyCell instances + + """ + + super(VirtNUMATopology, self).__init__() + + self.cells = cells or [] + + def __len__(self): + """Defined so that boolean testing works the same as for lists.""" + return len(self.cells) + + +class VirtNUMAInstanceTopology(VirtNUMATopology): + """Class to represent the topology configured for a guest + instance. It provides helper APIs to determine configuration + from the metadata specified against the flavour and or + disk image + """ + + @staticmethod + def _get_flavor_or_image_prop(flavor, image_meta, propname): + flavor_val = flavor.get('extra_specs', {}).get("hw:" + propname) + image_val = image_meta.get("hw_" + propname) + + if flavor_val is not None: + if image_val is not None: + raise exception.ImageNUMATopologyForbidden( + name='hw_' + propname) + + return flavor_val + else: + return image_val + + @classmethod + def _get_constraints_manual(cls, nodes, flavor, image_meta): + cells = [] + totalmem = 0 + + availcpus = set(range(flavor.vcpus)) + + for node in range(nodes): + cpus = cls._get_flavor_or_image_prop( + flavor, image_meta, "numa_cpus.%d" % node) + mem = cls._get_flavor_or_image_prop( + flavor, image_meta, "numa_mem.%d" % node) + + # We're expecting both properties set, so + # raise an error if either is missing + if cpus is None or mem is None: + raise exception.ImageNUMATopologyIncomplete() + + mem = int(mem) + cpuset = parse_cpu_spec(cpus) + + for cpu in cpuset: + if cpu > (flavor.vcpus - 1): + raise exception.ImageNUMATopologyCPUOutOfRange( + cpunum=cpu, cpumax=(flavor.vcpus - 1)) + + if cpu not in availcpus: + raise exception.ImageNUMATopologyCPUDuplicates( + cpunum=cpu) + + availcpus.remove(cpu) + + cells.append(VirtNUMATopologyCell(node, cpuset, mem)) + totalmem = totalmem + mem + + if availcpus: + raise exception.ImageNUMATopologyCPUsUnassigned( + cpuset=str(availcpus)) + + if totalmem != flavor.memory_mb: + raise exception.ImageNUMATopologyMemoryOutOfRange( + memsize=totalmem, + memtotal=flavor.memory_mb) + + return cls(cells) + + @classmethod + def _get_constraints_auto(cls, nodes, flavor, image_meta): + if ((flavor.vcpus % nodes) > 0 or + (flavor.memory_mb % nodes) > 0): + raise exception.ImageNUMATopologyAsymmetric() + + cells = [] + for node in range(nodes): + cpus = cls._get_flavor_or_image_prop( + flavor, image_meta, "numa_cpus.%d" % node) + mem = cls._get_flavor_or_image_prop( + flavor, image_meta, "numa_mem.%d" % node) + + # We're not expecting any properties set, so + # raise an error if there are any + if cpus is not None or mem is not None: + raise exception.ImageNUMATopologyIncomplete() + + ncpus = int(flavor.vcpus / nodes) + mem = int(flavor.memory_mb / nodes) + start = node * ncpus + cpuset = set(range(start, start + ncpus)) + + cells.append(VirtNUMATopologyCell(node, cpuset, mem)) + + return cls(cells) + + @classmethod + def get_constraints(cls, flavor, image_meta): + nodes = cls._get_flavor_or_image_prop( + flavor, image_meta, "numa_nodes") + + if nodes is None: + return None + + nodes = int(nodes) + + # We'll pick what path to go down based on whether + # anything is set for the first node. Both paths + # have logic to cope with inconsistent property usage + auto = cls._get_flavor_or_image_prop( + flavor, image_meta, "numa_cpus.0") is None + + if auto: + return cls._get_constraints_auto( + nodes, flavor, image_meta) + else: + return cls._get_constraints_manual( + nodes, flavor, image_meta) + + +class VirtNUMAHostTopology(VirtNUMATopology): + + """Class represents the NUMA configuration and utilization + of a compute node. As well as exposing the overall topology + it tracks the utilization of the resources by guest instances + """ + + @classmethod + def usage_from_instances(cls, host, instances): + """Get host topology usage + + :param host: VirtNUMAHostTopology without usage information + :param instances: list of VirtNUMAInstanceTopology + + Sum the usage from all @instances to report the overall + host topology usage + + :returns: VirtNUMAHostTopology including usage information + """ + + cells = [] + for hostcell in host.cells: + memory_usage = 0 + cpu_usage = 0 + for instance in instances: + for instancecell in instance.cells: + if instancecell.id == hostcell.id: + memory_usage = memory_usage + instancecell.memory + cpu_usage = cpu_usage + len(instancecell.cpuset) + + cell = VirtNUMATopologyCellUsage( + hostcell.id, hostcell.cpuset, hostcell.memory, + cpu_usage, memory_usage) + + cells.append(cell) + + return cls(cells) From 4a0b1dfcf49cd3d24ce1a37911851e1ef4596923 Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Fri, 11 Jul 2014 18:03:13 +0200 Subject: [PATCH 376/486] Add dict and json methods to VirtNUMATopology classes We will need to serialize and de-serialise these, so we add a method to base Cell and Topology classes that will handle transformations to and from JSON. Internally this uses (also newly added) _{to,from}_dict() methods, which are handy to have (especially for testing), but should not be used outside of the serialization logic. We also add defaults to the VirtNUMATopologyCellUsage for the usage bits, and add a class attribute 'cell_class' to VirtNUMATopology that makes it possible to write most of the serialization methods in a more general manner. Blueprint: virt-driver-numa-placement Change-Id: I552d642994f273215c307da7e0f11d77ee410acb --- nova/tests/virt/test_hardware.py | 101 +++++++++++++++++++++++++++++-- nova/virt/hardware.py | 51 +++++++++++++++- 2 files changed, 144 insertions(+), 8 deletions(-) diff --git a/nova/tests/virt/test_hardware.py b/nova/tests/virt/test_hardware.py index 050c60ea09..228f4ca871 100644 --- a/nova/tests/virt/test_hardware.py +++ b/nova/tests/virt/test_hardware.py @@ -14,6 +14,7 @@ from nova import exception from nova import test +from nova.tests import matchers from nova.virt import hardware as hw @@ -823,9 +824,9 @@ def test_topology_constraints(self): def test_host_usage_contiguous(self): hosttopo = hw.VirtNUMAHostTopology([ - hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024), - hw.VirtNUMATopologyCell(1, set([4, 6]), 512), - hw.VirtNUMATopologyCell(2, set([5, 7]), 512), + hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024), + hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512), + hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 512), ]) instance1 = hw.VirtNUMAInstanceTopology([ hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 256), @@ -870,9 +871,9 @@ def test_host_usage_contiguous(self): def test_host_usage_sparse(self): hosttopo = hw.VirtNUMAHostTopology([ - hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024), - hw.VirtNUMATopologyCell(5, set([4, 6]), 512), - hw.VirtNUMATopologyCell(6, set([5, 7]), 512), + hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024), + hw.VirtNUMATopologyCellUsage(5, set([4, 6]), 512), + hw.VirtNUMATopologyCellUsage(6, set([5, 7]), 512), ]) instance1 = hw.VirtNUMAInstanceTopology([ hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 256), @@ -918,3 +919,91 @@ def test_host_usage_sparse(self): hostusage.cells[2].memory) self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) + + def _test_to_dict(self, cell_or_topo, expected): + got = cell_or_topo._to_dict() + self.assertThat(expected, matchers.DictMatches(got)) + + def assertNUMACellMatches(self, expected_cell, got_cell): + attrs = ('cpuset', 'memory', 'id') + if isinstance(expected_cell, hw.VirtNUMAHostTopology): + attrs += ('cpu_usage', 'memory_usage') + + for attr in attrs: + self.assertEqual(getattr(expected_cell, attr), + getattr(got_cell, attr)) + + def _test_cell_from_dict(self, data_dict, expected_cell): + cell_class = expected_cell.__class__ + got_cell = cell_class._from_dict(data_dict) + self.assertNUMACellMatches(expected_cell, got_cell) + + def _test_topo_from_dict(self, data_dict, expected_topo, with_usage=False): + topology_class = ( + hw.VirtNUMAHostTopology + if with_usage else hw.VirtNUMAInstanceTopology) + got_topo = topology_class._from_dict( + data_dict) + for got_cell, expected_cell in zip( + got_topo.cells, expected_topo.cells): + self.assertNUMACellMatches(expected_cell, got_cell) + + def test_numa_cell_dict(self): + cell = hw.VirtNUMATopologyCell(1, set([1, 2]), 512) + cell_dict = {'cpus': '1,2', + 'mem': {'total': 512}, + 'id': 1} + self._test_to_dict(cell, cell_dict) + self._test_cell_from_dict(cell_dict, cell) + + def test_numa_cell_usage_dict(self): + cell = hw.VirtNUMATopologyCellUsage(1, set([1, 2]), 512) + cell_dict = {'cpus': '1,2', 'cpu_usage': 0, + 'mem': {'total': 512, 'used': 0}, + 'id': 1} + self._test_to_dict(cell, cell_dict) + self._test_cell_from_dict(cell_dict, cell) + + def test_numa_instance_topo_dict(self): + topo = hw.VirtNUMAInstanceTopology( + cells=[ + hw.VirtNUMATopologyCell(1, set([1, 2]), 1024), + hw.VirtNUMATopologyCell(2, set([3, 4]), 1024)]) + topo_dict = {'cells': [ + {'cpus': '1,2', + 'mem': {'total': 1024}, + 'id': 1}, + {'cpus': '3,4', + 'mem': {'total': 1024}, + 'id': 2}]} + self._test_to_dict(topo, topo_dict) + self._test_topo_from_dict(topo_dict, topo, with_usage=False) + + def test_numa_topo_dict_with_usage(self): + topo = hw.VirtNUMAHostTopology( + cells=[ + hw.VirtNUMATopologyCellUsage( + 1, set([1, 2]), 1024), + hw.VirtNUMATopologyCellUsage( + 2, set([3, 4]), 1024)]) + topo_dict = {'cells': [ + {'cpus': '1,2', 'cpu_usage': 0, + 'mem': {'total': 1024, 'used': 0}, + 'id': 1}, + {'cpus': '3,4', 'cpu_usage': 0, + 'mem': {'total': 1024, 'used': 0}, + 'id': 2}]} + self._test_to_dict(topo, topo_dict) + self._test_topo_from_dict(topo_dict, topo, with_usage=True) + + def test_json(self): + expected = hw.VirtNUMAHostTopology( + cells=[ + hw.VirtNUMATopologyCellUsage( + 1, set([1, 2]), 1024), + hw.VirtNUMATopologyCellUsage( + 2, set([3, 4]), 1024)]) + got = hw.VirtNUMAHostTopology.from_json(expected.to_json()) + + for exp_cell, got_cell in zip(expected.cells, got.cells): + self.assertNUMACellMatches(exp_cell, got_cell) diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py index d1864915af..da38cdd24b 100644 --- a/nova/virt/hardware.py +++ b/nova/virt/hardware.py @@ -18,6 +18,7 @@ from nova import exception from nova.i18n import _ +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging virt_cpu_opts = [ @@ -508,6 +509,18 @@ def __init__(self, id, cpuset, memory): self.cpuset = cpuset self.memory = memory + def _to_dict(self): + return {'cpus': format_cpu_spec(self.cpuset, allow_ranges=False), + 'mem': {'total': self.memory}, + 'id': self.id} + + @classmethod + def _from_dict(cls, data_dict): + cpuset = parse_cpu_spec(data_dict.get('cpus', '')) + memory = data_dict.get('mem', {}).get('total', 0) + cell_id = data_dict.get('id') + return cls(cell_id, cpuset, memory) + class VirtNUMATopologyCellUsage(VirtNUMATopologyCell): """Class for reporting NUMA resources and usage in a cell @@ -517,7 +530,7 @@ class VirtNUMATopologyCellUsage(VirtNUMATopologyCell): utilization of hardware resources in a NUMA cell. """ - def __init__(self, id, cpuset, memory, cpu_usage, memory_usage): + def __init__(self, id, cpuset, memory, cpu_usage=0, memory_usage=0): """Create a new NUMA Cell with usage :param id: integer identifier of cell @@ -543,6 +556,21 @@ def __init__(self, id, cpuset, memory, cpu_usage, memory_usage): self.cpu_usage = cpu_usage self.memory_usage = memory_usage + def _to_dict(self): + data_dict = super(VirtNUMATopologyCellUsage, self)._to_dict() + data_dict['mem']['used'] = self.memory_usage + data_dict['cpu_usage'] = self.cpu_usage + return data_dict + + @classmethod + def _from_dict(cls, data_dict): + cpuset = parse_cpu_spec(data_dict.get('cpus', '')) + cpu_usage = data_dict.get('cpu_usage', 0) + memory = data_dict.get('mem', {}).get('total', 0) + memory_usage = data_dict.get('mem', {}).get('used', 0) + cell_id = data_dict.get('id') + return cls(cell_id, cpuset, memory, cpu_usage, memory_usage) + class VirtNUMATopology(object): """Base class for tracking NUMA topology information @@ -568,6 +596,21 @@ def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) + def _to_dict(self): + return {'cells': [cell._to_dict() for cell in self.cells]} + + @classmethod + def _from_dict(cls, data_dict): + return cls(cells=[cls.cell_class._from_dict(cell_dict) + for cell_dict in data_dict.get('cells', [])]) + + def to_json(self): + return jsonutils.dumps(self._to_dict()) + + @classmethod + def from_json(cls, json_string): + return cls._from_dict(jsonutils.loads(json_string)) + class VirtNUMAInstanceTopology(VirtNUMATopology): """Class to represent the topology configured for a guest @@ -576,6 +619,8 @@ class VirtNUMAInstanceTopology(VirtNUMATopology): disk image """ + cell_class = VirtNUMATopologyCell + @staticmethod def _get_flavor_or_image_prop(flavor, image_meta, propname): flavor_val = flavor.get('extra_specs', {}).get("hw:" + propname) @@ -694,6 +739,8 @@ class VirtNUMAHostTopology(VirtNUMATopology): it tracks the utilization of the resources by guest instances """ + cell_class = VirtNUMATopologyCellUsage + @classmethod def usage_from_instances(cls, host, instances): """Get host topology usage @@ -717,7 +764,7 @@ def usage_from_instances(cls, host, instances): memory_usage = memory_usage + instancecell.memory cpu_usage = cpu_usage + len(instancecell.cpuset) - cell = VirtNUMATopologyCellUsage( + cell = cls.cell_class( hostcell.id, hostcell.cpuset, hostcell.memory, cpu_usage, memory_usage) From 54da43b29ca444e316714e2bc402425bd5832d96 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Tue, 12 Aug 2014 17:46:46 +0900 Subject: [PATCH 377/486] Raise HTTPNotFound error from V2 cert show API There might be possibility of not having the CA file while requesting certificate show API. API should raise HTTPNotFound for clear error. This patch raise this exception for V2 API. This has been fixed in V3 API. Adding UT also for the same. Change-Id: Iafb33fce6433a31f9767067bbc45b95acd4ee511 --- nova/api/openstack/compute/contrib/certificates.py | 8 ++++++-- .../openstack/compute/contrib/test_certificates.py | 12 ++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py index 9f1131aae6..dd6b7d3e71 100644 --- a/nova/api/openstack/compute/contrib/certificates.py +++ b/nova/api/openstack/compute/contrib/certificates.py @@ -18,6 +18,7 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil import nova.cert.rpcapi +from nova import exception from nova.i18n import _ authorize = extensions.extension_authorizer('compute', 'certificates') @@ -58,8 +59,11 @@ def show(self, req, id): if id != 'root': msg = _("Only root certificate can be retrieved.") raise webob.exc.HTTPNotImplemented(explanation=msg) - cert = self.cert_rpcapi.fetch_ca(context, - project_id=context.project_id) + try: + cert = self.cert_rpcapi.fetch_ca(context, + project_id=context.project_id) + except exception.CryptoCAFileNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) return {'certificate': _translate_certificate_view(cert)} @wsgi.serializers(xml=CertificateTemplate) diff --git a/nova/tests/api/openstack/compute/contrib/test_certificates.py b/nova/tests/api/openstack/compute/contrib/test_certificates.py index af0ea6060c..367a48aad6 100644 --- a/nova/tests/api/openstack/compute/contrib/test_certificates.py +++ b/nova/tests/api/openstack/compute/contrib/test_certificates.py @@ -15,9 +15,12 @@ # under the License. from lxml import etree +import mock import mox +from webob import exc from nova.api.openstack.compute.contrib import certificates +from nova.cert import rpcapi from nova import context from nova import exception from nova.openstack.common import policy as common_policy @@ -96,6 +99,15 @@ def test_certificates_create_policy_failed(self): self.assertIn("compute_extension:certificates", exc.format_message()) + @mock.patch.object(rpcapi.CertAPI, 'fetch_ca', + side_effect=exception.CryptoCAFileNotFound(project='fake')) + def test_non_exist_certificates_show(self, mock_fetch_ca): + req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root') + self.assertRaises( + exc.HTTPNotFound, + self.controller.show, + req, 'root') + class CertificatesSerializerTest(test.NoDBTestCase): def test_index_serializer(self): From 257183a8a9130f2b444f7f96ec8582da89684528 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 13 Aug 2014 09:03:49 -0700 Subject: [PATCH 378/486] Direct-load Instance.fault when lazy-loading This breaks out instance.fault lazy-loading from the other attributes, since we have a direct and more efficient way of fetching the fault than re-querying the entire instance with the fault attached. This also should help address fault-related races in the API where a list of instances is queried, one of those is deleted, and the fault attribute later triggers an InstanceNotFound whilst trying to do the lazy-load. Change-Id: Iceb552663db93fa2a01fb90ece0c1eebecdb783f Closes-bug: #1352659 --- nova/objects/instance.py | 33 ++++++++++++++------- nova/tests/objects/test_instance.py | 45 +++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 10 deletions(-) diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 136e98eb1e..263b327731 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -502,6 +502,23 @@ def refresh(self, context, use_slave=False): self[field] = current[field] self.obj_reset_changes() + def _load_generic(self, attrname): + instance = self.__class__.get_by_uuid(self._context, + uuid=self.uuid, + expected_attrs=[attrname]) + + # NOTE(danms): Never allow us to recursively-load + if instance.obj_attr_is_set(attrname): + self[attrname] = instance[attrname] + else: + raise exception.ObjectActionError( + action='obj_load_attr', + reason='loading %s requires recursion' % attrname) + + def _load_fault(self): + self.fault = objects.InstanceFault.get_latest_for_instance( + self._context, self.uuid) + def obj_load_attr(self, attrname): if attrname not in INSTANCE_OPTIONAL_ATTRS: raise exception.ObjectActionError( @@ -517,17 +534,13 @@ def obj_load_attr(self, attrname): 'uuid': self.uuid, }) # FIXME(comstud): This should be optimized to only load the attr. - instance = self.__class__.get_by_uuid(self._context, - uuid=self.uuid, - expected_attrs=[attrname]) - - # NOTE(danms): Never allow us to recursively-load - if instance.obj_attr_is_set(attrname): - self[attrname] = instance[attrname] + if attrname == 'fault': + # NOTE(danms): We handle fault differently here so that we + # can be more efficient + self._load_fault() else: - raise exception.ObjectActionError( - action='obj_load_attr', - reason='loading %s requires recursion' % attrname) + self._load_generic(attrname) + self.obj_reset_changes([attrname]) def get_flavor(self, namespace=None): prefix = ('%s_' % namespace) if namespace is not None else '' diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py index 2d35fd9386..270fe7641c 100644 --- a/nova/tests/objects/test_instance.py +++ b/nova/tests/objects/test_instance.py @@ -836,6 +836,51 @@ def test_reset_changes(self): self.assertEqual({'1985': 'present'}, inst._orig_metadata) self.assertEqual({}, inst._orig_system_metadata) + def test_load_generic_calls_handler(self): + inst = instance.Instance(context=self.context, + uuid='fake-uuid') + with mock.patch.object(inst, '_load_generic') as mock_load: + def fake_load(name): + inst.system_metadata = {} + + mock_load.side_effect = fake_load + inst.system_metadata + mock_load.assert_called_once_with('system_metadata') + + def test_load_fault_calls_handler(self): + inst = instance.Instance(context=self.context, + uuid='fake-uuid') + with mock.patch.object(inst, '_load_fault') as mock_load: + def fake_load(): + inst.fault = None + + mock_load.side_effect = fake_load + inst.fault + mock_load.assert_called_once_with() + + @mock.patch('nova.objects.Instance.get_by_uuid') + def test_load_generic(self, mock_get): + inst2 = instance.Instance(metadata={'foo': 'bar'}) + mock_get.return_value = inst2 + inst = instance.Instance(context=self.context, + uuid='fake-uuid') + inst.metadata + self.assertEqual({'foo': 'bar'}, inst.metadata) + mock_get.assert_called_once_with(self.context, + uuid='fake-uuid', + expected_attrs=['metadata']) + self.assertNotIn('metadata', inst.obj_what_changed()) + + @mock.patch('nova.db.instance_fault_get_by_instance_uuids') + def test_load_fault(self, mock_get): + fake_fault = test_instance_fault.fake_faults['fake-uuid'][0] + mock_get.return_value = {'fake': [fake_fault]} + inst = instance.Instance(context=self.context, uuid='fake') + fault = inst.fault + mock_get.assert_called_once_with(self.context, ['fake']) + self.assertEqual(fake_fault['id'], fault.id) + self.assertNotIn('metadata', inst.obj_what_changed()) + class TestInstanceObject(test_objects._LocalTest, _TestInstanceObject): From bf8f7c90fcb0f1db60e778dc85fedfcf9d7a9338 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 14 Aug 2014 07:19:19 -0700 Subject: [PATCH 379/486] Include child_versions in object hashes This adds the child_versions array to the object hash test. This helps remind contributors and reviewers that changing that mapping also requires a bump to the list object version. Change-Id: Ieb9c6ebe0dc1ca65d0541187fcbba4e73fe0f8d1 --- nova/tests/objects/test_objects.py | 47 ++++++++++++++++-------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 9d1519a05b..a4c256b141 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -925,58 +925,58 @@ def test_object_serialization_iterables(self): # objects object_data = { 'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d', - 'AgentList': '1.0-f8b860e1f2ce80e676ba1a37ddf86e4f', + 'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25', 'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5', - 'AggregateList': '1.2-504137b7ec3855b00d01f165dcebc23e', + 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a', 'BlockDeviceMapping': '1.1-9968ffe513e7672484b0f528b034cd0f', - 'BlockDeviceMappingList': '1.2-d6d7df540ca149dda78b22b4b10bdef3', + 'BlockDeviceMappingList': '1.2-a6df0a8ef84d6bbaba51143499e9bed2', 'ComputeNode': '1.4-ed20e7a7c1a4612fe7d2836d5887c726', - 'ComputeNodeList': '1.3-ff59187056eaa96f6fd3fb70693d818c', + 'ComputeNodeList': '1.3-1c9c281e02182eabffa6b63ee349996a', 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba', - 'DNSDomainList': '1.0-6e3cc498d89dd7e90f9beb021644221c', + 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4', 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99', 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836', 'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143', 'FixedIP': '1.1-082fb26772ce2db783ce4934edca4652', - 'FixedIPList': '1.1-8ea5cfca611598f1242fd4095e49e58b', + 'FixedIPList': '1.1-c12d1165c88fa721ab8abcf502fa1b29', 'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4', - 'FlavorList': '1.1-d559595f55936a6d602721c3bdff6fff', + 'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721', 'FloatingIP': '1.1-27eb68b7c9c620dd5f0561b5a3be0e82', - 'FloatingIPList': '1.2-1b77acb3523d16e3282624f51fee60d8', + 'FloatingIPList': '1.2-6c5b0b4d4a4c17575f4d91bae14e5237', 'Instance': '1.13-c9cfd71ddc9d6e7e7c72879f4d5982ee', 'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663', 'InstanceActionEvent': '1.1-f144eaa9fb22f248fc41ed8401a3a1be', - 'InstanceActionEventList': '1.0-937f4ed414ff2354de416834b948fbd6', - 'InstanceActionList': '1.0-d46ade45deeba63c55821e22c164bd1b', + 'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e', + 'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266', 'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7', 'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e', - 'InstanceFaultList': '1.1-bd578be60d045629ca7b3ce1a2493ae4', + 'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d', 'InstanceGroup': '1.7-b31ea31fdb452ab7810adbe789244f91', - 'InstanceGroupList': '1.2-bebd07052779ae3b47311efe85428a8b', + 'InstanceGroupList': '1.2-a474822eebc3e090012e581adcc1fa09', 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f', - 'InstanceList': '1.6-78800140a5f9818ab00f8c052437655f', + 'InstanceList': '1.6-6891f6f61f8eb0b55c0cefac3f734c24', 'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a', - 'KeyPairList': '1.0-854cfff138dac9d5925c89cf805d1a70', + 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8', 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed', - 'MigrationList': '1.1-6ca2ebb822ebfe1a660bace824b378c6', + 'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353', 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93', 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298', 'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e', - 'NetworkList': '1.2-16510568c6e64cb8b358cb2b11333196', + 'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e', 'PciDevice': '1.1-523c46f960d93f78db55f0280b09441e', - 'PciDeviceList': '1.0-5da7b4748a5a2594bae2cd0bd211cca2', + 'PciDeviceList': '1.0-43d6c4ea0dd77955e97b23d937a3f925', 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418', 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f', 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2', 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b', - 'SecurityGroupList': '1.0-9513387aabf08c2a7961ac4da4315ed4', + 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f', 'SecurityGroupRule': '1.0-fdd020bdd7eb8bac744ad6f9a4ef8165', - 'SecurityGroupRuleList': '1.0-af4deeea8699ee90fb217f77d711d781', + 'SecurityGroupRuleList': '1.0-1052b37dc59a1957ee5b0b9268d03af3', 'Service': '1.2-5a3df338c669e1148251431370b440ef', - 'ServiceList': '1.0-ae64b4922df28d7cd11c59cddddf926c', + 'ServiceList': '1.0-2c960ac9bc56a12c65b9118bb3a58b44', 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd', 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2', - 'VirtualInterfaceList': '1.0-dc9e9d5bce522d28f96092c49119b3e0', + 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6', } @@ -999,7 +999,10 @@ def _get_fingerprint(self, obj_class): # Of course, these are just the mechanical changes we can detect, # but many other things may require a version bump (method behavior # and return value changes, for example). - relevant_data = (fields, methods) + if hasattr(obj_class, 'child_versions'): + relevant_data = (fields, methods, obj_class.child_versions) + else: + relevant_data = (fields, methods) return '%s-%s' % (obj_class.VERSION, hashlib.md5(str(relevant_data)).hexdigest()) From 98ee523cfb3e788bda3ca3f5ebd918bdc67e00cc Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 14 Aug 2014 07:37:00 -0700 Subject: [PATCH 380/486] Report all objects with hash mismatches in a single go The object hash test previously stopped when it hit the first object with a mismatched hash. This meant that if you changed a couple objects, you had to run the test, fix the first, run the test, fix the next, etc. This patch collects all the data and then asserts the delta set, if there were mismatches. Change-Id: Ie35a4dbd6941a1f31e0cef7ea56fa2cc5e9a8ef0 --- nova/tests/objects/test_objects.py | 41 +++++++++++++++--------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index a4c256b141..a4eadcfce8 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -983,9 +983,9 @@ def test_object_serialization_iterables(self): class TestObjectVersions(test.TestCase): def setUp(self): super(TestObjectVersions, self).setUp() - self._fingerprints = {} - def _get_fingerprint(self, obj_class): + def _get_fingerprint(self, obj_name): + obj_class = base.NovaObject._obj_classes[obj_name][0] fields = obj_class.fields.items() fields.sort() methods = [] @@ -1003,30 +1003,31 @@ def _get_fingerprint(self, obj_class): relevant_data = (fields, methods, obj_class.child_versions) else: relevant_data = (fields, methods) - return '%s-%s' % (obj_class.VERSION, - hashlib.md5(str(relevant_data)).hexdigest()) - - def _test_versions_cls(self, obj_name): - obj_class = base.NovaObject._obj_classes[obj_name][0] - expected_fingerprint = object_data.get(obj_name, 'unknown') - actual_fingerprint = self._get_fingerprint(obj_class) - - self._fingerprints[obj_name] = actual_fingerprint - - if os.getenv('GENERATE_HASHES'): - return - - self.assertEqual( - expected_fingerprint, actual_fingerprint, - ('%s object has changed; please make sure the version ' - 'has been bumped, and then update this hash') % obj_name) + fingerprint = '%s-%s' % (obj_class.VERSION, + hashlib.md5(str(relevant_data)).hexdigest()) + return fingerprint def test_versions(self): + fingerprints = {} for obj_name in base.NovaObject._obj_classes: - self._test_versions_cls(obj_name) + fingerprints[obj_name] = self._get_fingerprint(obj_name) if os.getenv('GENERATE_HASHES'): file('object_hashes.txt', 'w').write( pprint.pformat(self._fingerprints)) raise test.TestingException( 'Generated hashes in object_hashes.txt') + + stored = set(object_data.items()) + computed = set(fingerprints.items()) + changed = stored - computed + expected = {} + actual = {} + for name, hash in changed: + expected[name] = object_data.get(name) + actual[name] = fingerprints.get(name) + + self.assertEqual(expected, actual, + 'Some objects have changed; please make sure the ' + 'versions have been bumped, and then update their ' + 'hashes here.') From 23340b49b1adee5cb9592b8e6a8471969b9341c7 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 5 Aug 2014 05:11:17 -0700 Subject: [PATCH 381/486] Image caching: update image caching to use objects Commit 1023e703bd41c2a42b1159af0d9e907e94440b34 added support for objects. This patch ensures that the parsing of the instances is correct. The patch does the following: 1. In the method _list_running_instances it treats all instances as an object 2. Fixes the bug mentioned below. The code was making a string from None. This would lead to invalid image details. 3. Ensures that the unit tests actually pass instance objects instead of an instance dict 4. In relevant tests updates assertEquals to have the expected parameter first Closes-bug: #1354664 Change-Id: Ib028baab4d4c823f22c371a83f7a813c24d77570 --- nova/tests/virt/libvirt/test_imagecache.py | 62 +++++++++------ nova/tests/virt/test_imagecache.py | 83 +++++++++++--------- nova/tests/virt/vmwareapi/test_imagecache.py | 8 +- nova/virt/imagecache.py | 22 +++--- 4 files changed, 101 insertions(+), 74 deletions(-) diff --git a/nova/tests/virt/libvirt/test_imagecache.py b/nova/tests/virt/libvirt/test_imagecache.py index 1045f8e3bb..bef87dad69 100644 --- a/nova/tests/virt/libvirt/test_imagecache.py +++ b/nova/tests/virt/libvirt/test_imagecache.py @@ -609,20 +609,25 @@ def isfile(path): self.stubs.Set(os.path, 'isfile', lambda x: isfile(x)) # Fake the database call which lists running instances - all_instances = [{'image_ref': '1', - 'host': CONF.host, - 'name': 'instance-1', - 'uuid': '123', - 'vm_state': '', - 'task_state': ''}, - {'image_ref': '1', - 'kernel_id': '21', - 'ramdisk_id': '22', - 'host': CONF.host, - 'name': 'instance-2', - 'uuid': '456', - 'vm_state': '', - 'task_state': ''}] + instances = [{'image_ref': '1', + 'host': CONF.host, + 'name': 'instance-1', + 'uuid': '123', + 'vm_state': '', + 'task_state': ''}, + {'image_ref': '1', + 'kernel_id': '21', + 'ramdisk_id': '22', + 'host': CONF.host, + 'name': 'instance-2', + 'uuid': '456', + 'vm_state': '', + 'task_state': ''}] + + all_instances = [] + for instance in instances: + all_instances.append(fake_instance.fake_instance_obj( + None, **instance)) image_cache_manager = imagecache.ImageCacheManager() @@ -717,18 +722,23 @@ def test_configured_checksum_path(self): os.mkdir(os.path.join(tmpdir, '_base')) # Fake the database call which lists running instances - all_instances = [{'image_ref': '1', - 'host': CONF.host, - 'name': 'instance-1', - 'uuid': '123', - 'vm_state': '', - 'task_state': ''}, - {'image_ref': '1', - 'host': CONF.host, - 'name': 'instance-2', - 'uuid': '456', - 'vm_state': '', - 'task_state': ''}] + instances = [{'image_ref': '1', + 'host': CONF.host, + 'name': 'instance-1', + 'uuid': '123', + 'vm_state': '', + 'task_state': ''}, + {'image_ref': '1', + 'host': CONF.host, + 'name': 'instance-2', + 'uuid': '456', + 'vm_state': '', + 'task_state': ''}] + + all_instances = [] + for instance in instances: + all_instances.append(fake_instance.fake_instance_obj( + None, **instance)) def touch(filename): f = open(filename, 'w') diff --git a/nova/tests/virt/test_imagecache.py b/nova/tests/virt/test_imagecache.py index 42058e9124..a2b9ff9f59 100644 --- a/nova/tests/virt/test_imagecache.py +++ b/nova/tests/virt/test_imagecache.py @@ -16,6 +16,7 @@ from nova.compute import vm_states from nova import test +from nova.tests import fake_instance from nova.virt import imagecache CONF = cfg.CONF @@ -47,26 +48,29 @@ def test_cache_manager(self): None, [], None) def test_list_running_instances(self): - all_instances = [{'image_ref': '1', - 'host': CONF.host, - 'name': 'inst-1', - 'uuid': '123', - 'vm_state': '', - 'task_state': ''}, - {'image_ref': '2', - 'host': CONF.host, - 'name': 'inst-2', - 'uuid': '456', - 'vm_state': '', - 'task_state': ''}, - {'image_ref': '2', - 'kernel_id': '21', - 'ramdisk_id': '22', - 'host': 'remotehost', - 'name': 'inst-3', - 'uuid': '789', - 'vm_state': '', - 'task_state': ''}] + instances = [{'image_ref': '1', + 'host': CONF.host, + 'id': '1', + 'uuid': '123', + 'vm_state': '', + 'task_state': ''}, + {'image_ref': '2', + 'host': CONF.host, + 'id': '2', + 'uuid': '456', + 'vm_state': '', + 'task_state': ''}, + {'image_ref': '2', + 'kernel_id': '21', + 'ramdisk_id': '22', + 'host': 'remotehost', + 'id': '3', + 'uuid': '789', + 'vm_state': '', + 'task_state': ''}] + + all_instances = [fake_instance.fake_instance_obj(None, **instance) + for instance in instances] image_cache_manager = imagecache.ImageCacheManager() @@ -75,13 +79,17 @@ def test_list_running_instances(self): all_instances) self.assertEqual(len(running['used_images']), 4) - self.assertEqual(running['used_images']['1'], (1, 0, ['inst-1'])) - self.assertEqual(running['used_images']['2'], (1, 1, ['inst-2', - 'inst-3'])) - self.assertEqual(running['used_images']['21'], (0, 1, ['inst-3'])) - self.assertEqual(running['used_images']['22'], (0, 1, ['inst-3'])) - - self.assertIn('inst-1', running['instance_names']) + self.assertEqual((1, 0, ['instance-00000001']), + running['used_images']['1']) + self.assertEqual((1, 1, ['instance-00000002', + 'instance-00000003']), + running['used_images']['2']) + self.assertEqual((0, 1, ['instance-00000003']), + running['used_images']['21']) + self.assertEqual((0, 1, ['instance-00000003']), + running['used_images']['22']) + + self.assertIn('instance-00000001', running['instance_names']) self.assertIn('123', running['instance_names']) self.assertEqual(len(running['image_popularity']), 4) @@ -91,20 +99,25 @@ def test_list_running_instances(self): self.assertEqual(running['image_popularity']['22'], 1) def test_list_resizing_instances(self): - all_instances = [{'image_ref': '1', - 'host': CONF.host, - 'name': 'inst-1', - 'uuid': '123', - 'vm_state': vm_states.RESIZED, - 'task_state': None}] + instances = [{'image_ref': '1', + 'host': CONF.host, + 'id': '1', + 'uuid': '123', + 'vm_state': vm_states.RESIZED, + 'task_state': None}] + + all_instances = [fake_instance.fake_instance_obj(None, **instance) + for instance in instances] image_cache_manager = imagecache.ImageCacheManager() running = image_cache_manager._list_running_instances(None, all_instances) self.assertEqual(len(running['used_images']), 1) - self.assertEqual((1, 0, ['inst-1']), running['used_images']['1']) - self.assertEqual(set(['inst-1', '123', 'inst-1_resize', '123_resize']), + self.assertEqual((1, 0, ['instance-00000001']), + running['used_images']['1']) + self.assertEqual(set(['instance-00000001', '123', + 'instance-00000001_resize', '123_resize']), running['instance_names']) self.assertEqual(len(running['image_popularity']), 1) diff --git a/nova/tests/virt/vmwareapi/test_imagecache.py b/nova/tests/virt/vmwareapi/test_imagecache.py index 3c3108d7f0..671c3ac9e1 100644 --- a/nova/tests/virt/vmwareapi/test_imagecache.py +++ b/nova/tests/virt/vmwareapi/test_imagecache.py @@ -20,6 +20,7 @@ from nova.openstack.common import timeutils from nova import test +from nova.tests import fake_instance from nova.tests.virt.vmwareapi import fake from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import imagecache @@ -233,9 +234,14 @@ def fake_age_cached_images(context, datastore, 'uuid': '456', 'vm_state': '', 'task_state': ''}] + all_instances = [] + for instance in instances: + all_instances.append(fake_instance.fake_instance_obj( + None, **instance)) + self.images = set(['1', '2']) datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref') dc_info = vmops.DcInfo(ref='dc_ref', name='name', vmFolder='vmFolder') datastores_info = [(datastore, dc_info)] - self._imagecache.update('context', instances, datastores_info) + self._imagecache.update('context', all_instances, datastores_info) diff --git a/nova/virt/imagecache.py b/nova/virt/imagecache.py index eb551ed609..4045683a3c 100644 --- a/nova/virt/imagecache.py +++ b/nova/virt/imagecache.py @@ -78,26 +78,24 @@ def _list_running_instances(self, context, all_instances): # NOTE(mikal): "instance name" here means "the name of a directory # which might contain an instance" and therefore needs to include # historical permutations as well as the current one. - instance_names.add(instance['name']) - instance_names.add(instance['uuid']) - - if (instance['task_state'] in self.resize_states or - instance['vm_state'] == vm_states.RESIZED): - instance_names.add(instance['name'] + '_resize') - instance_names.add(instance['uuid'] + '_resize') + instance_names.add(instance.name) + instance_names.add(instance.uuid) + if (instance.task_state in self.resize_states or + instance.vm_state == vm_states.RESIZED): + instance_names.add(instance.name + '_resize') + instance_names.add(instance.uuid + '_resize') for image_key in ['image_ref', 'kernel_id', 'ramdisk_id']: - try: - image_ref_str = str(instance[image_key]) - except KeyError: + image_ref_str = getattr(instance, image_key) + if image_ref_str is None: continue local, remote, insts = used_images.get(image_ref_str, (0, 0, [])) - if instance['host'] == CONF.host: + if instance.host == CONF.host: local += 1 else: remote += 1 - insts.append(instance['name']) + insts.append(instance.name) used_images[image_ref_str] = (local, remote, insts) image_popularity.setdefault(image_ref_str, 0) From d7f097d69e86d8a6cdf957364998637cdd139907 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 30 Jun 2014 11:58:46 +0100 Subject: [PATCH 382/486] VMware: Create VMwareImage object for image metadata This change pulls the nested function _get_image_properties() out of spawn() and makes it a factory method for the new VMwareImage object. Linked clone logic is moved from a separate function in vmops.py to the new get_image_properties() function. vmware_images.get_vmdk_size_and_properties() is no longer used, and is removed. Tests have been updated and/or moved as appropriate. No tests have been removed. This change has been split out of https://review.openstack.org/#/c/87002/, which was written by Shawn Hartsock. partial blueprint vmware-spawn-refactor Co-authored-by: Shawn Hartsock Change-Id: I580f173da798318d2675c7c70bbdd19b266259f4 --- nova/tests/virt/vmwareapi/fake.py | 7 - nova/tests/virt/vmwareapi/stubs.py | 3 - nova/tests/virt/vmwareapi/test_driver_api.py | 212 ++++++++++-------- nova/tests/virt/vmwareapi/test_vmops.py | 82 +++---- .../virt/vmwareapi/test_vmware_images.py | 135 +++++++++++ nova/virt/vmwareapi/constants.py | 9 +- nova/virt/vmwareapi/vmops.py | 198 +++++----------- nova/virt/vmwareapi/vmware_images.py | 131 +++++++++-- 8 files changed, 470 insertions(+), 307 deletions(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index bfa9002324..e12d917258 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -972,13 +972,6 @@ def fake_upload_image(context, image, instance, **kwargs): pass -def fake_get_vmdk_size_and_properties(context, image_id, instance): - """Fakes the file size and properties fetch for the image file.""" - props = {"vmware_ostype": constants.DEFAULT_OS_TYPE, - "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE} - return _FAKE_FILE_SIZE, props - - def _get_vm_mdo(vm_ref): """Gets the Virtual Machine with the ref from the db.""" if _db_content.get("VirtualMachine", None) is None: diff --git a/nova/tests/virt/vmwareapi/stubs.py b/nova/tests/virt/vmwareapi/stubs.py index 440fff3dff..44bbe26363 100644 --- a/nova/tests/virt/vmwareapi/stubs.py +++ b/nova/tests/virt/vmwareapi/stubs.py @@ -25,7 +25,6 @@ from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import network_util -from nova.virt.vmwareapi import vmware_images def fake_get_vim_object(arg): @@ -65,8 +64,6 @@ def set_stubs(stubs): """Set the stubs.""" stubs.Set(network_util, 'get_network_with_the_name', fake.fake_get_network) - stubs.Set(vmware_images, 'get_vmdk_size_and_properties', - fake.fake_get_vmdk_size_and_properties) stubs.Set(driver.VMwareAPISession, "_get_vim_object", fake_get_vim_object) stubs.Set(driver.VMwareAPISession, "_is_vim_object", diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 58a7c2765b..2188c88af7 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -54,6 +54,7 @@ from nova.tests.virt.vmwareapi import stubs from nova import utils as nova_utils from nova.virt import driver as v_driver +from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util @@ -693,19 +694,35 @@ def _cached_files_exist(self, exists=True): else: self.assertFalse(vmwareapi_fake.get_file(str(cache))) - def test_instance_dir_disk_created(self): + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + def test_instance_dir_disk_created(self, mock_from_image): """Test image file is cached when even when use_linked_clone is False """ + img_props = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + linked_clone=False) + mock_from_image.return_value = img_props self._create_vm() path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid) self.assertTrue(vmwareapi_fake.get_file(str(path))) self._cached_files_exist() - def test_cache_dir_disk_created(self): + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + def test_cache_dir_disk_created(self, mock_from_image): """Test image disk is cached when use_linked_clone is True.""" self.flags(use_linked_clone=True, group='vmware') + + img_props = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + file_size=1 * units.Ki, + disk_type=constants.DISK_TYPE_SPARSE) + + mock_from_image.return_value = img_props + self._create_vm() path = ds_util.DatastorePath(self.ds, 'vmware_base', self.fake_image_uuid, @@ -748,7 +765,18 @@ def fake_attach_cdrom(vm_ref, instance, data_store_ref, self.image['disk_format'] = 'iso' self._create_vm() - def test_iso_disk_cdrom_attach_with_config_drive(self): + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + def test_iso_disk_cdrom_attach_with_config_drive(self, + mock_from_image): + img_props = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + file_size=80 * units.Gi, + file_type='iso', + linked_clone=False) + + mock_from_image.return_value = img_props + self.flags(force_config_drive=True) iso_path = [ ds_util.DatastorePath(self.ds, 'vmware_base', @@ -920,57 +948,32 @@ def _fake_extend(instance, requested_size, name, dc_ref): self._check_vm_info(info, power_state.RUNNING) self.assertTrue(vmwareapi_fake.get_file(str(root))) - def test_spawn_disk_extend_sparse(self): - self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') - result = [1024, {"vmware_ostype": "otherGuest", - "vmware_adaptertype": "lsiLogic", - "vmware_disktype": "sparse"}] - vmware_images.get_vmdk_size_and_properties( - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(result) - self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk') - requested_size = 80 * units.Mi - self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(), - requested_size, mox.IgnoreArg(), mox.IgnoreArg()) - self.mox.ReplayAll() - self._create_vm() - info = self.conn.get_info({'uuid': self.uuid, - 'node': self.instance_node}) - self._check_vm_info(info, power_state.RUNNING) - - def test_spawn_disk_extend_insufficient_disk_space(self): - self.flags(use_linked_clone=True, group='vmware') - self.wait_task = self.conn._session._wait_for_task - self.call_method = self.conn._session._call_method - self.task_ref = None - id = self.fake_image_uuid - cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds, - id, id) - tmp_file = '[%s] vmware_base/%s/%s.80-flat.vmdk' % (self.ds, - id, id) - - def fake_wait_for_task(task_ref): - if task_ref == self.task_ref: - self.task_ref = None - self.assertTrue(vmwareapi_fake.get_file(cached_image)) - self.assertTrue(vmwareapi_fake.get_file(tmp_file)) - raise exception.NovaException('No space!') - return self.wait_task(task_ref) + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + def test_spawn_disk_extend_sparse(self, mock_from_image): + img_props = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + file_size=units.Ki, + disk_type=constants.DISK_TYPE_SPARSE, + linked_clone=True) - def fake_call_method(module, method, *args, **kwargs): - task_ref = self.call_method(module, method, *args, **kwargs) - if method == "ExtendVirtualDisk_Task": - self.task_ref = task_ref - return task_ref - - self.stubs.Set(self.conn._session, "_call_method", fake_call_method) - self.stubs.Set(self.conn._session, "_wait_for_task", - fake_wait_for_task) + mock_from_image.return_value = img_props - self.assertRaises(exception.NovaException, - self._create_vm) - self.assertFalse(vmwareapi_fake.get_file(cached_image)) - self.assertFalse(vmwareapi_fake.get_file(tmp_file)) + with contextlib.nested( + mock.patch.object(self.conn._vmops, '_extend_virtual_disk'), + mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'), + ) as (mock_extend, mock_get_dc): + dc_val = mock.Mock() + dc_val.ref = "fake_dc_ref" + dc_val.name = "dc1" + mock_get_dc.return_value = dc_val + self._create_vm() + iid = img_props.image_id + cached_image = ds_util.DatastorePath(self.ds, 'vmware_base', + iid, '%s.80.vmdk' % iid) + mock_extend.assert_called_once_with( + self.instance, self.instance.root_gb * units.Mi, + str(cached_image), "fake_dc_ref") def test_spawn_disk_extend_failed_copy(self): # Spawn instance @@ -1086,28 +1089,63 @@ def fake_call_method(module, method, *args, **kwargs): self.assertRaises(DeleteError, self._create_vm) self.assertTrue(vmwareapi_fake.get_file(cached_image)) - def test_spawn_disk_invalid_disk_size(self): - self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') - result = [82 * units.Gi, - {"vmware_ostype": "otherGuest", - "vmware_adaptertype": "lsiLogic", - "vmware_disktype": "sparse"}] - vmware_images.get_vmdk_size_and_properties( - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(result) - self.mox.ReplayAll() + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + def test_spawn_disk_invalid_disk_size(self, mock_from_image): + img_props = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + file_size=82 * units.Gi, + disk_type=constants.DISK_TYPE_SPARSE, + linked_clone=True) + + mock_from_image.return_value = img_props + self.assertRaises(exception.InstanceUnacceptable, self._create_vm) - def test_spawn_invalid_disk_format(self): - self._create_instance() - self.image['disk_format'] = 'invalid' - self.assertRaises(exception.InvalidDiskFormat, - self.conn.spawn, self.context, - self.instance, self.image, - injected_files=[], admin_password=None, - network_info=self.network_info, - block_device_info=None) + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image): + img_props = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + file_size=1024, + disk_type=constants.DISK_TYPE_SPARSE, + linked_clone=True) + + mock_from_image.return_value = img_props + + cached_image = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.80.vmdk' % + self.fake_image_uuid) + tmp_file = ds_util.DatastorePath(self.ds, 'vmware_base', + self.fake_image_uuid, + '%s.80-flat.vmdk' % + self.fake_image_uuid) + + NoDiskSpace = error_util.get_fault_class('NoDiskSpace') + + def fake_wait_for_task(task_ref): + if task_ref == self.task_ref: + self.task_ref = None + raise NoDiskSpace() + return self.wait_task(task_ref) + + def fake_call_method(module, method, *args, **kwargs): + task_ref = self.call_method(module, method, *args, **kwargs) + if method == 'ExtendVirtualDisk_Task': + self.task_ref = task_ref + return task_ref + + with contextlib.nested( + mock.patch.object(self.conn._session, '_wait_for_task', + fake_wait_for_task), + mock.patch.object(self.conn._session, '_call_method', + fake_call_method) + ) as (mock_wait_for_task, mock_call_method): + self.assertRaises(NoDiskSpace, self._create_vm) + self.assertFalse(vmwareapi_fake.get_file(str(cached_image))) + self.assertFalse(vmwareapi_fake.get_file(str(tmp_file))) def test_spawn_with_move_file_exists_exception(self): # The test will validate that the spawn completes @@ -2300,28 +2338,20 @@ def test_spawn_invalid_node(self): network_info=self.network_info, block_device_info=None) - def test_spawn_with_sparse_image(self): - # Only a sparse disk image triggers the copy - self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') - result = [1024, {"vmware_ostype": "otherGuest", - "vmware_adaptertype": "lsiLogic", - "vmware_disktype": "sparse"}] - vmware_images.get_vmdk_size_and_properties( - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(result) + @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage, + 'from_image') + @mock.patch.object(vmops.VMwareVCVMOps, 'get_copy_virtual_disk_spec') + def test_spawn_with_sparse_image(self, mock_get_copy_virtual_disk_spec, + mock_from_image): + img_info = vmware_images.VMwareImage( + image_id=self.fake_image_uuid, + file_size=1024, + disk_type=constants.DISK_TYPE_SPARSE, + linked_clone=False) - # Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called - # two times - self.mox.StubOutWithMock(vmops.VMwareVCVMOps, - 'get_copy_virtual_disk_spec') - self.conn._vmops.get_copy_virtual_disk_spec( - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(None) - self.conn._vmops.get_copy_virtual_disk_spec( - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()).AndReturn(None) + mock_from_image.return_value = img_info + mock_get_copy_virtual_disk_spec.return_value = None - self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index b77ffe1860..4f92ccacfa 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -29,6 +29,7 @@ import nova.tests.image.fake from nova.tests.virt.vmwareapi import fake as vmwareapi_fake from nova.tests.virt.vmwareapi import stubs +from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util @@ -38,6 +39,39 @@ from nova.virt.vmwareapi import vmware_images +class VMwareVMOpsSimpleTestCase(test.NoDBTestCase): + @mock.patch.object(vm_util, 'get_res_pool_ref') + @mock.patch.object(ds_util, 'get_datastore') + @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name') + def test_spawn_disk_invalid_disk_size(self, + mock_get_datacenter_ref_and_name, + mock_get_datastore, + mock_get_res_pool_ref): + image = { + 'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c', + 'disk_format': 'vmdk', + 'size': 999999999 * units.Gi, + } + self._context = context.RequestContext('fake_user', 'fake_project') + instance = fake_instance.fake_instance_obj(self._context, + image_ref=nova.tests.image.fake.get_valid_image_id(), + uuid='fake_uuid', + root_gb=1, + node='respool-1001(MyResPoolName)' + ) + + ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock()) + self.assertRaises(exception.InstanceUnacceptable, + ops.spawn, + mock.Mock(), + instance, + image, + injected_files=[], + admin_password=None, + network_info=None, + block_device_info=None) + + class VMwareVMOpsTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVMOpsTestCase, self).setUp() @@ -60,7 +94,7 @@ def setUp(self): 'vcpus': 1, 'memory_mb': 512, 'image_ref': self._image_id, - 'root_gb': 1, + 'root_gb': 10, 'node': 'respool-1001(MyResPoolName)' } self._instance = fake_instance.fake_instance_obj( @@ -122,21 +156,6 @@ def setUp(self): rxtx_cap=3) ]) - def test_get_disk_format_none(self): - format, is_iso = self._vmops._get_disk_format({'disk_format': None}) - self.assertIsNone(format) - self.assertFalse(is_iso) - - def test_get_disk_format_iso(self): - format, is_iso = self._vmops._get_disk_format({'disk_format': 'iso'}) - self.assertEqual('iso', format) - self.assertTrue(is_iso) - - def test_get_disk_format_bad(self): - self.assertRaises(exception.InvalidDiskFormat, - self._vmops._get_disk_format, - {'disk_format': 'foo'}) - def test_get_machine_id_str(self): result = vmops.VMwareVMOps._get_machine_id_str(self.network_info) self.assertEqual(result, @@ -146,33 +165,6 @@ def test_get_machine_id_str(self): self.pure_IPv6_network_info) self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result) - def test_use_linked_clone_override_nf(self): - value = vmops.VMwareVMOps.decide_linked_clone(None, False) - self.assertFalse(value, "No overrides present but still overridden!") - - def test_use_linked_clone_override_none_true(self): - value = vmops.VMwareVMOps.decide_linked_clone(None, True) - self.assertTrue(value, "No overrides present but still overridden!") - - def test_use_linked_clone_override_ny(self): - value = vmops.VMwareVMOps.decide_linked_clone(None, "yes") - self.assertTrue(value, "No overrides present but still overridden!") - - def test_use_linked_clone_override_ft(self): - value = vmops.VMwareVMOps.decide_linked_clone(False, True) - self.assertFalse(value, - "image level metadata failed to override global") - - def test_use_linked_clone_override_no_true(self): - value = vmops.VMwareVMOps.decide_linked_clone("no", True) - self.assertFalse(value, - "image level metadata failed to override global") - - def test_use_linked_clone_override_yf(self): - value = vmops.VMwareVMOps.decide_linked_clone("yes", False) - self.assertTrue(value, - "image level metadata failed to override global") - def _setup_create_folder_mocks(self): ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock()) base_name = 'folder' @@ -670,8 +662,8 @@ def _test_spawn(self, mock_get_res_pool_ref.assert_called_once_with( self._session, None, 'fake_node_mo_id') mock_get_vif_info.assert_called_once_with( - self._session, None, False, network_model.VIF_MODEL_E1000, - network_info) + self._session, None, False, + constants.DEFAULT_VIF_MODEL, network_info) mock_get_create_spec.assert_called_once_with( self._session._get_vim().client.factory, self._instance, diff --git a/nova/tests/virt/vmwareapi/test_vmware_images.py b/nova/tests/virt/vmwareapi/test_vmware_images.py index 5e451646fe..8b55c39987 100644 --- a/nova/tests/virt/vmwareapi/test_vmware_images.py +++ b/nova/tests/virt/vmwareapi/test_vmware_images.py @@ -19,8 +19,11 @@ import mock +from nova import exception +from nova.openstack.common import units from nova import test import nova.tests.image.fake +from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import read_write_util from nova.virt.vmwareapi import vmware_images @@ -81,3 +84,135 @@ def fake_write_handle(host, dc_name, ds_name, cookies, write_file_handle=write_file_handle) image_download.assert_called_once_with(context, instance['image_ref']) image_show.assert_called_once_with(context, instance['image_ref']) + + def _setup_mock_get_remote_image_service(self, + mock_get_remote_image_service, + metadata): + mock_image_service = mock.MagicMock() + mock_image_service.show.return_value = metadata + mock_get_remote_image_service.return_value = [mock_image_service, 'i'] + + def test_from_image_with_image_ref(self): + raw_disk_size_in_gb = 83 + raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi + image_id = nova.tests.image.fake.get_valid_image_id() + mdata = {'size': raw_disk_size_in_bytes, + 'disk_format': 'vmdk', + 'properties': { + "vmware_ostype": constants.DEFAULT_OS_TYPE, + "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE, + "vmware_disktype": constants.DEFAULT_DISK_TYPE, + "hw_vif_model": constants.DEFAULT_VIF_MODEL, + vmware_images.LINKED_CLONE_PROPERTY: True}} + + img_props = vmware_images.VMwareImage.from_image(image_id, mdata) + + image_size_in_kb = raw_disk_size_in_bytes / units.Ki + image_size_in_gb = raw_disk_size_in_bytes / units.Gi + + # assert that defaults are set and no value returned is left empty + self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type) + self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, + img_props.adapter_type) + self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type) + self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model) + self.assertTrue(img_props.linked_clone) + self.assertEqual(image_size_in_kb, img_props.file_size_in_kb) + self.assertEqual(image_size_in_gb, img_props.file_size_in_gb) + + def _image_build(self, image_lc_setting, global_lc_setting, + disk_format=constants.DEFAULT_DISK_FORMAT, + os_type=constants.DEFAULT_OS_TYPE, + adapter_type=constants.DEFAULT_ADAPTER_TYPE, + disk_type=constants.DEFAULT_DISK_TYPE, + vif_model=constants.DEFAULT_VIF_MODEL): + self.flags(use_linked_clone=global_lc_setting, group='vmware') + raw_disk_size_in_gb = 93 + raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi + + image_id = nova.tests.image.fake.get_valid_image_id() + mdata = {'size': raw_disk_size_in_btyes, + 'disk_format': disk_format, + 'properties': { + "vmware_ostype": os_type, + "vmware_adaptertype": adapter_type, + "vmware_disktype": disk_type, + "hw_vif_model": vif_model}} + + if image_lc_setting is not None: + mdata['properties'][ + vmware_images.LINKED_CLONE_PROPERTY] = image_lc_setting + + return vmware_images.VMwareImage.from_image(image_id, mdata) + + def test_use_linked_clone_override_nf(self): + image_props = self._image_build(None, False) + self.assertFalse(image_props.linked_clone, + "No overrides present but still overridden!") + + def test_use_linked_clone_override_nt(self): + image_props = self._image_build(None, True) + self.assertTrue(image_props.linked_clone, + "No overrides present but still overridden!") + + def test_use_linked_clone_override_ny(self): + image_props = self._image_build(None, "yes") + self.assertTrue(image_props.linked_clone, + "No overrides present but still overridden!") + + def test_use_linked_clone_override_ft(self): + image_props = self._image_build(False, True) + self.assertFalse(image_props.linked_clone, + "image level metadata failed to override global") + + def test_use_linked_clone_override_string_nt(self): + image_props = self._image_build("no", True) + self.assertFalse(image_props.linked_clone, + "image level metadata failed to override global") + + def test_use_linked_clone_override_string_yf(self): + image_props = self._image_build("yes", False) + self.assertTrue(image_props.linked_clone, + "image level metadata failed to override global") + + def test_use_disk_format_none(self): + image = self._image_build(None, True, disk_format=None) + self.assertIsNone(image.file_type) + self.assertFalse(image.is_iso) + + def test_use_disk_format_iso(self): + image = self._image_build(None, True, disk_format='iso') + self.assertEqual('iso', image.file_type) + self.assertTrue(image.is_iso) + + def test_use_bad_disk_format(self): + self.assertRaises(exception.InvalidDiskFormat, + self._image_build, + None, + True, + disk_format='bad_disk_format') + + def test_image_no_defaults(self): + image = self._image_build(False, False, + disk_format='iso', + os_type='fake-os-type', + adapter_type='fake-adapter-type', + disk_type='fake-disk-type', + vif_model='fake-vif-model') + self.assertEqual('iso', image.file_type) + self.assertEqual('fake-os-type', image.os_type) + self.assertEqual('fake-adapter-type', image.adapter_type) + self.assertEqual('fake-disk-type', image.disk_type) + self.assertEqual('fake-vif-model', image.vif_model) + self.assertFalse(image.linked_clone) + + def test_image_defaults(self): + image = vmware_images.VMwareImage(image_id='fake-image-id') + + # N.B. We intentially don't use the defined constants here. Amongst + # other potential failures, we're interested in changes to their + # values, which would not otherwise be picked up. + self.assertEqual('otherGuest', image.os_type) + self.assertEqual('lsiLogic', image.adapter_type) + self.assertEqual('preallocated', image.disk_type) + self.assertEqual('e1000', image.vif_model) diff --git a/nova/virt/vmwareapi/constants.py b/nova/virt/vmwareapi/constants.py index 6493c556e7..449618247c 100644 --- a/nova/virt/vmwareapi/constants.py +++ b/nova/virt/vmwareapi/constants.py @@ -18,8 +18,15 @@ from nova.network import model as network_model +DISK_FORMAT_ISO = 'iso' +DISK_FORMAT_VMDK = 'vmdk' +DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK] + +DISK_TYPE_SPARSE = 'sparse' +DISK_TYPE_PREALLOCATED = 'preallocated' DEFAULT_VIF_MODEL = network_model.VIF_MODEL_E1000 DEFAULT_OS_TYPE = "otherGuest" DEFAULT_ADAPTER_TYPE = "lsiLogic" -DEFAULT_DISK_TYPE = "preallocated" +DEFAULT_DISK_TYPE = DISK_TYPE_PREALLOCATED +DEFAULT_DISK_FORMAT = DISK_FORMAT_VMDK diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index b27f255ec5..1c1c468109 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -36,7 +36,6 @@ from nova.openstack.common import excutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging -from nova.openstack.common import strutils from nova.openstack.common import units from nova.openstack.common import uuidutils from nova import utils @@ -67,8 +66,6 @@ 'poweredOn': power_state.RUNNING, 'suspended': power_state.SUSPENDED} -VMWARE_LINKED_CLONE = 'vmware_linked_clone' - RESIZE_TOTAL_STEPS = 4 DcInfo = collections.namedtuple('DcInfo', @@ -154,11 +151,13 @@ def _delete_datastore_file(self, instance, datastore_path, dc_ref): def _get_vmdk_path(self, ds_name, folder, name): return str(ds_util.DatastorePath(ds_name, folder, '%s.vmdk' % name)) - def _get_disk_format(self, image_meta): - disk_format = image_meta.get('disk_format') - if disk_format not in ['iso', 'vmdk', None]: - raise exception.InvalidDiskFormat(disk_format=disk_format) - return (disk_format, disk_format == 'iso') + def _extend_if_required(self, dc_info, image_info, instance, + root_vmdk_path): + """Increase the size of the root vmdk if necessary.""" + if instance.root_gb > image_info.file_size_in_gb: + size_in_kb = instance.root_gb * units.Mi + self._extend_virtual_disk(instance, size_in_kb, + root_vmdk_path, dc_info.ref) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None, @@ -198,57 +197,16 @@ def spawn(self, context, instance, image_meta, injected_files, if block_device_mapping: ebs_root = True - (file_type, is_iso) = self._get_disk_format(image_meta) - client_factory = self._session._get_vim().client.factory datastore = ds_util.get_datastore( self._session, self._cluster, datastore_regex=self._datastore_regex) dc_info = self.get_datacenter_ref_and_name(datastore.ref) - # TODO(hartsocks): this pattern is confusing, reimplement as methods - # The use of nested functions in this file makes for a confusing and - # hard to maintain file. At some future date, refactor this method to - # be a full-fledged method. This will also make unit testing easier. - def _get_image_properties(root_size): - """Get the Size of the flat vmdk file that is there on the storage - repository. - """ - image_ref = instance.image_ref - if image_ref: - _image_info = vmware_images.get_vmdk_size_and_properties( - context, image_ref, instance) - else: - # The case that the image may be booted from a volume - _image_info = (root_size, {}) - - image_size, image_properties = _image_info - vmdk_file_size_in_kb = int(image_size) / 1024 - os_type = image_properties.get("vmware_ostype", - constants.DEFAULT_OS_TYPE) - adapter_type = image_properties.get("vmware_adaptertype", - constants.DEFAULT_ADAPTER_TYPE) - disk_type = image_properties.get("vmware_disktype", - constants.DEFAULT_DISK_TYPE) - # Get the network card type from the image properties. - vif_model = image_properties.get("hw_vif_model", - constants.DEFAULT_VIF_MODEL) - - # Fetch the image_linked_clone data here. It is retrieved - # with the above network based API call. To retrieve it - # later will necessitate additional network calls using the - # identical method. Consider this a cache. - image_linked_clone = image_properties.get(VMWARE_LINKED_CLONE) - - return (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, - vif_model, image_linked_clone) - - root_gb_in_kb = instance.root_gb * units.Mi - - (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, - image_linked_clone) = _get_image_properties(root_gb_in_kb) - - if root_gb_in_kb and vmdk_file_size_in_kb > root_gb_in_kb: + image_info = vmware_images.VMwareImage.from_image(instance.image_ref, + image_meta) + if (instance.root_gb != 0 and + image_info.file_size_in_gb > instance.root_gb): reason = _("Image disk size greater than requested disk size") raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) @@ -258,7 +216,8 @@ def _get_image_properties(root_size): self._cluster, node_mo_id) vif_infos = vmwarevif.get_vif_info(self._session, self._cluster, - utils.is_neutron(), vif_model, + utils.is_neutron(), + image_info.vif_model, network_info) # Get the instance name. In some cases this may differ from the 'uuid', @@ -269,7 +228,7 @@ def _get_image_properties(root_size): # Create the VM config_spec = vm_util.get_vm_create_spec( client_factory, instance, instance_name, - datastore.name, vif_infos, os_type) + datastore.name, vif_infos, image_info.os_type) vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder, config_spec, res_pool_ref) @@ -292,22 +251,19 @@ def _get_image_properties(root_size): # this logic allows for instances or images to decide # for themselves which strategy is best for them. - linked_clone = VMwareVMOps.decide_linked_clone( - image_linked_clone, - CONF.vmware.use_linked_clone - ) upload_name = instance.image_ref upload_folder = '%s/%s' % (self._base_folder, upload_name) # The vmdk meta-data file uploaded_file_path = str(datastore.build_path( - upload_folder, "%s.%s" % (upload_name, file_type))) + upload_folder, + "%s.%s" % (upload_name, image_info.file_type))) session_vim = self._session._get_vim() cookies = session_vim.client.options.transport.cookiejar ds_browser = self._get_ds_browser(datastore.ref) - upload_file_name = upload_name + ".%s" % file_type + upload_file_name = upload_name + ".%s" % image_info.file_type # Check if the timestamp file exists - if so then delete it. This # will ensure that the aging will not delete a cache image if it @@ -351,8 +307,8 @@ def _get_image_properties(root_size): upload_path_loc = datastore.build_path( upload_folder, upload_file_name) upload_rel_path = upload_path_loc.rel_path - if not is_iso: - if disk_type != "sparse": + if not image_info.is_iso: + if not image_info.is_sparse: # Create a flat virtual disk and retain the metadata # file. This will be done in the unique temporary # directory. @@ -364,10 +320,10 @@ def _get_image_properties(root_size): datastore.name, instance=instance) vm_util.create_virtual_disk(self._session, dc_info.ref, - adapter_type, - disk_type, + image_info.adapter_type, + image_info.disk_type, str(upload_path_loc), - vmdk_file_size_in_kb) + image_info.file_size_in_kb) LOG.debug("Virtual disk created on %s.", datastore.name, instance=instance) self._delete_datastore_file(instance, @@ -385,12 +341,13 @@ def _get_image_properties(root_size): upload_rel_path, cookies=cookies) - if not is_iso and disk_type == "sparse": + if not image_info.is_iso and image_info.is_sparse: # Copy the sparse virtual disk to a thin virtual disk. disk_type = "thin" - copy_spec = self.get_copy_virtual_disk_spec(client_factory, - adapter_type, - disk_type) + copy_spec = self.get_copy_virtual_disk_spec( + client_factory, + image_info.adapter_type, + disk_type) vm_util.copy_virtual_disk(self._session, dc_info.ref, str(sparse_ds_loc), str(upload_path_loc), @@ -416,13 +373,9 @@ def _get_image_properties(root_size): datastore.build_path( tmp_upload_folder), dc_info.ref) - else: - # linked clone base disk exists - if disk_type == "sparse": - disk_type = "thin" - if is_iso: - if root_gb_in_kb: + if image_info.is_iso: + if instance.root_gb != 0: dest_vmdk_path = self._get_vmdk_path(datastore.name, instance.uuid, instance_name) @@ -431,10 +384,10 @@ def _get_image_properties(root_size): datastore.name, instance=instance) vm_util.create_virtual_disk(self._session, dc_info.ref, - adapter_type, - disk_type, + image_info.adapter_type, + image_info.disk_type, dest_vmdk_path, - root_gb_in_kb) + image_info.file_size_in_kb) LOG.debug("Blank virtual disk created on %s.", datastore.name, instance=instance) root_vmdk_path = dest_vmdk_path @@ -442,24 +395,24 @@ def _get_image_properties(root_size): root_vmdk_path = None else: # Extend the disk size if necessary - if not linked_clone: + if not image_info.linked_clone: # If we are not using linked_clone, copy the image from # the cache into the instance directory. If we are using # linked clone it is references from the cache directory dest_vmdk_path = self._get_vmdk_path(datastore.name, instance_name, instance_name) - copy_spec = self.get_copy_virtual_disk_spec(client_factory, - adapter_type, - disk_type) + copy_spec = self.get_copy_virtual_disk_spec( + client_factory, + image_info.adapter_type, + image_info.disk_type) vm_util.copy_virtual_disk(self._session, dc_info.ref, uploaded_file_path, dest_vmdk_path, copy_spec) root_vmdk_path = dest_vmdk_path - if root_gb_in_kb > vmdk_file_size_in_kb: - self._extend_virtual_disk(instance, root_gb_in_kb, - root_vmdk_path, dc_info.ref) + self._extend_if_required(dc_info, image_info, instance, + root_vmdk_path) else: upload_folder = '%s/%s' % (self._base_folder, upload_name) if instance.root_gb: @@ -493,7 +446,9 @@ def _get_image_properties(root_size): instance.root_gb) copy_spec = self.get_copy_virtual_disk_spec( - client_factory, adapter_type, disk_type) + client_factory, + image_info.adapter_type, + image_info.disk_type) # Create a copy of the base image, ensuring we # clean up on failure @@ -524,20 +479,21 @@ def _get_image_properties(root_size): # Resize the copy to the appropriate size. No need # for cleanup up here, as _extend_virtual_disk # already does it - if root_gb_in_kb > vmdk_file_size_in_kb: - self._extend_virtual_disk(instance, - root_gb_in_kb, - root_vmdk_path, - dc_info.ref) + self._extend_if_required(dc_info, image_info, + instance, root_vmdk_path) # Attach the root disk to the VM. - if root_vmdk_path: + if root_vmdk_path is not None: self._volumeops.attach_disk_to_vm( - vm_ref, instance, - adapter_type, disk_type, root_vmdk_path, - root_gb_in_kb, linked_clone) - - if is_iso: + vm_ref, + instance, + image_info.adapter_type, + image_info.disk_type, + root_vmdk_path, + instance.root_gb * units.Mi, + image_info.linked_clone) + + if image_info.is_iso: self._attach_cdrom_to_vm( vm_ref, instance, datastore.ref, @@ -627,52 +583,6 @@ def _attach_cdrom_to_vm(self, vm_ref, instance, LOG.debug("Reconfigured VM instance to attach cdrom %s", file_path, instance=instance) - @staticmethod - def decide_linked_clone(image_linked_clone, global_linked_clone): - """Explicit decision logic: whether to use linked clone on a vmdk. - - This is *override* logic not boolean logic. - - 1. let the image over-ride if set at all - 2. default to the global setting - - In math terms, I need to allow: - glance image to override global config. - - That is g vs c. "g" for glance. "c" for Config. - - So, I need g=True vs c=False to be True. - And, I need g=False vs c=True to be False. - And, I need g=None vs c=True to be True. - - Some images maybe independently best tuned for use_linked_clone=True - saving datastorage space. Alternatively a whole OpenStack install may - be tuned to performance use_linked_clone=False but a single image - in this environment may be best configured to save storage space and - set use_linked_clone=True only for itself. - - The point is: let each layer of control override the layer beneath it. - - rationale: - For technical discussion on the clone strategies and their trade-offs - see: https://www.vmware.com/support/ws5/doc/ws_clone_typeofclone.html - - :param image_linked_clone: boolean or string or None - :param global_linked_clone: boolean or string or None - :return: Boolean - """ - - value = None - - # Consider the values in order of override. - if image_linked_clone is not None: - value = image_linked_clone - else: - # this will never be not-set by this point. - value = global_linked_clone - - return strutils.bool_from_string(value) - def get_copy_virtual_disk_spec(self, client_factory, adapter_type, disk_type): return vm_util.get_copy_virtual_disk_spec(client_factory, diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 65f6b7fed0..38ed03d039 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -14,22 +14,136 @@ # License for the specific language governing permissions and limitations # under the License. """ -Utility functions for Image transfer. +Utility functions for Image transfer and manipulation. """ import os +from oslo.config import cfg + from nova import exception from nova import image from nova.openstack.common import log as logging +from nova.openstack.common import strutils +from nova.openstack.common import units +from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import io_util from nova.virt.vmwareapi import read_write_util +# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it +# because nova.virt.vmwareapi.driver is imported first. In fact, it is not +# possible to import it here, as nova.virt.vmwareapi.driver calls +# CONF.register_opts() after the import chain which imports this module. This +# is not a problem as long as the import order doesn't change. +CONF = cfg.CONF + LOG = logging.getLogger(__name__) IMAGE_API = image.API() QUEUE_BUFFER_SIZE = 10 +LINKED_CLONE_PROPERTY = 'vmware_linked_clone' + + +class VMwareImage(object): + def __init__(self, image_id, + file_size=0, + os_type=constants.DEFAULT_OS_TYPE, + adapter_type=constants.DEFAULT_ADAPTER_TYPE, + disk_type=constants.DEFAULT_DISK_TYPE, + file_type=constants.DEFAULT_DISK_FORMAT, + linked_clone=None, + vif_model=constants.DEFAULT_VIF_MODEL): + """VMwareImage holds values for use in building VMs. + + image_id (str): uuid of the image + file_size (int): size of file in bytes + os_type (str): name of guest os (use vSphere names only) + adapter_type (str): name of the adapter's type + disk_type (str): type of disk in thin, thick, etc + file_type (str): vmdk or iso + linked_clone(bool): use linked clone, or don't + """ + self.image_id = image_id + self.file_size = file_size + self.os_type = os_type + self.adapter_type = adapter_type + self.disk_type = disk_type + self.file_type = file_type + + # NOTE(vui): This should be removed when we restore the + # descriptor-based validation. + if (self.file_type is not None and + self.file_type not in constants.DISK_FORMATS_ALL): + raise exception.InvalidDiskFormat(disk_format=self.file_type) + + if linked_clone is not None: + self.linked_clone = linked_clone + else: + self.linked_clone = CONF.vmware.use_linked_clone + self.vif_model = vif_model + + @property + def file_size_in_kb(self): + return self.file_size / units.Ki + + @property + def file_size_in_gb(self): + return self.file_size / units.Gi + + @property + def is_sparse(self): + return self.disk_type == constants.DISK_TYPE_SPARSE + + @property + def is_iso(self): + return self.file_type == constants.DISK_FORMAT_ISO + + @classmethod + def from_image(cls, image_id, image_meta=None): + """Returns VMwareImage, the subset of properties the driver uses. + + :param image_id - image id of image + :param image_meta - image metadata we are working with + :return: vmware image object + :rtype: nova.virt.vmwareapi.vmware_images.VmwareImage + """ + if image_meta is None: + image_meta = {} + + properties = image_meta.get("properties", {}) + + # calculate linked_clone flag, allow image properties to override the + # global property set in the configurations. + image_linked_clone = properties.get(LINKED_CLONE_PROPERTY, + CONF.vmware.use_linked_clone) + + # catch any string values that need to be interpreted as boolean values + linked_clone = strutils.bool_from_string(image_linked_clone) + + props = { + 'image_id': image_id, + 'linked_clone': linked_clone + } + + if 'size' in image_meta: + props['file_size'] = image_meta['size'] + if 'disk_format' in image_meta: + props['file_type'] = image_meta['disk_format'] + + props_map = { + 'vmware_ostype': 'os_type', + 'vmware_adaptertype': 'adapter_type', + 'vmware_disktype': 'disk_type', + 'hw_vif_model': 'vif_model' + } + + for k, v in props_map.iteritems(): + if k in properties: + props[v] = properties[k] + + return cls(**props) + def start_transfer(context, read_file_handle, data_size, write_file_handle=None, image_id=None, image_meta=None): @@ -171,18 +285,3 @@ def upload_image(context, image, instance, **kwargs): image_id=metadata['id'], image_meta=image_metadata) LOG.debug("Uploaded image %s to the Glance image server", image, instance=instance) - - -def get_vmdk_size_and_properties(context, image, instance): - """Get size of the vmdk file that is to be downloaded for attach in spawn. - Need this to create the dummy virtual disk for the meta-data file. The - geometry of the disk created depends on the size. - """ - - LOG.debug("Getting image size for the image %s", image, - instance=instance) - meta_data = IMAGE_API.get(context, image) - size, properties = meta_data["size"], meta_data["properties"] - LOG.debug("Got image size of %(size)s for the image %(image)s", - {'size': size, 'image': image}, instance=instance) - return size, properties From 942b9cd7f2ab77b7e790831cc985a9f6d2730318 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 30 Jun 2014 15:52:25 +0100 Subject: [PATCH 383/486] VMware: Remove references to ebs_root from spawn() spawn() contained an important boolean called ebs_root. This was True for any spawn which contained a block device mapping. This no longer has any meaning, and was confusing. This change removes references to ebs_root, instead using block_device_info directly. N.B. The block device mapping behaviour of the VMware driver is still not correct, but fixing it is not the purpose of this patch. This patch simply makes the existing behaviour easier to see. N.B. test_spawn_mask_block_device_info_password in test_vmops.py assumes internal details of spawn in order to cause a failure after emitting a log message. In moving the log message we need to update the dance this test does to ensure that spawn fails after the log message, but not before. This change has been split out of https://review.openstack.org/#/c/87002/, which was written by Shawn Hartsock. partial blueprint vmware-spawn-refactor Co-authored-by: Shawn Hartsock Change-Id: I11b355a6b53194bc9f85f542b54c9fbb9c061e04 --- nova/tests/virt/vmwareapi/test_vmops.py | 39 ++++++++++++------ nova/virt/vmwareapi/vmops.py | 53 +++++++++++++++---------- 2 files changed, 60 insertions(+), 32 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index 4f92ccacfa..034bef43a8 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -520,30 +520,45 @@ def test_finish_revert_migration_power_on(self): def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(power_on=False) - def test_spawn_mask_block_device_info_password(self): + @mock.patch.object(vmops.LOG, 'debug') + @mock.patch('nova.virt.vmwareapi.volumeops.VMwareVolumeOps' + '.attach_root_volume') + def test_spawn_mask_block_device_info_password(self, + mock_attach_root_volume, + mock_debug): # Very simple test that just ensures block_device_info auth_password # is masked when logged; the rest of the test just fails out early. data = {'auth_password': 'scrubme'} bdm = [{'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} + self.password_logged = False + # Tests that the parameters to the to_xml method are sanitized for # passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: + self.password_logged = True self.assertNotIn('scrubme', args[0]) - with mock.patch.object(vmops.LOG, 'debug', - side_effect=fake_debug) as debug_mock: - # the invalid disk format will cause an exception - image_meta = {'disk_format': 'fake'} - self.assertRaises(exception.InvalidDiskFormat, self._vmops.spawn, - self._context, self._instance, image_meta, - injected_files=None, admin_password=None, - network_info=[], block_device_info=bdi) - # we don't care what the log message is, we just want to make sure - # our stub method is called which asserts the password is scrubbed - self.assertTrue(debug_mock.called) + mock_debug.side_effect = fake_debug + self.flags(flat_injected=False, vnc_enabled=False) + mock_attach_root_volume.side_effect = Exception + + # Call spawn(). We don't care what it does as long as it generates + # the log message, which we check below. + try: + self._vmops.spawn( + self._context, self._instance, {}, + injected_files=None, admin_password=None, + network_info=[], block_device_info=bdi + ) + except Exception: + pass + + # Check that the relevant log message was generated, and therefore + # that we checked it was scrubbed + self.assertTrue(self.password_logged) def test_get_ds_browser(self): cache = self._vmops._datastore_browser_mapping diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 1c1c468109..642d035945 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -186,16 +186,11 @@ def spawn(self, context, instance, image_meta, injected_files, #. Power on the VM. """ - ebs_root = False - if block_device_info: - msg = "Block device information present: %s" % block_device_info - # NOTE(mriedem): block_device_info can contain an auth_password - # so we have to scrub the message before logging it. - LOG.debug(logging.mask_password(msg), instance=instance) - block_device_mapping = driver.block_device_info_get_mapping( - block_device_info) - if block_device_mapping: - ebs_root = True + + # NOTE(hartsocks): some of the logic below relies on instance_name + # even when it is not set by the caller. + if instance_name is None: + instance_name = instance.uuid client_factory = self._session._get_vim().client.factory datastore = ds_util.get_datastore( @@ -247,9 +242,34 @@ def spawn(self, context, instance, image_meta, injected_files, vnc_port = vm_util.get_vnc_port(self._session) self._set_vnc_config(client_factory, instance, vnc_port) - if not ebs_root: - # this logic allows for instances or images to decide - # for themselves which strategy is best for them. + block_device_mapping = [] + if block_device_info is not None: + block_device_mapping = driver.block_device_info_get_mapping( + block_device_info) + + # NOTE(mdbooth): the logic here is that we ignore the image if there + # are block device mappings. This behaviour is incorrect, and a bug in + # the driver. We should be able to accept an image and block device + # mappings. + if len(block_device_mapping) > 0: + msg = "Block device information present: %s" % block_device_info + # NOTE(mriedem): block_device_info can contain an auth_password + # so we have to scrub the message before logging it. + LOG.debug(logging.mask_password(msg), instance=instance) + + for root_disk in block_device_mapping: + connection_info = root_disk['connection_info'] + # TODO(hartsocks): instance is unnecessary, remove it + # we still use instance in many locations for no other purpose + # than logging, can we simplify this? + self._volumeops.attach_root_volume(connection_info, instance, + self._default_root_device, + datastore.ref) + else: + # TODO(hartsocks): Refactor this section image handling section. + # The next section handles manipulating various image types + # as well as preparing those image's virtual disks for mounting + # in our virtual machine. upload_name = instance.image_ref upload_folder = '%s/%s' % (self._base_folder, upload_name) @@ -514,13 +534,6 @@ def spawn(self, context, instance, image_meta, injected_files, datastore.ref, str(uploaded_iso_path)) - else: - # Attach the root disk to the VM. - for root_disk in block_device_mapping: - connection_info = root_disk['connection_info'] - self._volumeops.attach_root_volume(connection_info, instance, - self._default_root_device, - datastore.ref) if power_on: vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref) From 19d43793f5b7642dd6c681d04462c5b6c8a834c3 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Wed, 9 Jul 2014 10:32:55 +0100 Subject: [PATCH 384/486] VMware: Create fake VM with given datastore FakeVim._create_vm() was creating a VM with a default datastore rather than the given one. This prevented testing that the correct datastore has been used. Change-Id: I640a21f23b2d5c1186772c8c5ce3ca8927e6cbc5 --- nova/tests/virt/vmwareapi/fake.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index e12d917258..6ed22318af 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -29,6 +29,7 @@ from nova.openstack.common import units from nova.openstack.common import uuidutils from nova.virt.vmwareapi import constants +from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util _CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine', @@ -1130,7 +1131,15 @@ def _create_vm(self, method, *args, **kwargs): """Creates and registers a VM object with the Host System.""" config_spec = kwargs.get("config") pool = kwargs.get('pool') - ds = _db_content["Datastore"].keys()[0] + + vm_path = ds_util.DatastorePath.parse(config_spec.files.vmPathName) + for key, value in _db_content["Datastore"].iteritems(): + if value.get('summary.name') == vm_path.datastore: + ds = key + break + else: + ds = create_datastore(vm_path.datastore, 1024, 500) + host = _db_content["HostSystem"].keys()[0] vm_dict = {"name": config_spec.name, "ds": [ds], From 756bbbeeff4334a00b16d7f2ef6b440720ec45a1 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Wed, 9 Jul 2014 13:42:04 +0100 Subject: [PATCH 385/486] VMware: Fix type of VM's config.hardware.device in fake config.hardware.device was handled inconsistently, and could be of various types. When created by _create_vm(), it was actually a VirtualDeviceConfigSpec[]. It should be a VirtualDevice[]. Fix tests which depended on the wrong type. Add _create_array_of_type function to fake to create an array type which works correctly when passed to code. A returned array of Foo must be an instance of class ArrayOfFoo, and contain a single property, Foo, which is a python array of objects of type Foo. Change-Id: Ic90e1d0d947941621606fded9e56f9a98d111cad --- nova/tests/virt/vmwareapi/fake.py | 42 +++++++++++++++++--- nova/tests/virt/vmwareapi/test_driver_api.py | 2 +- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py index 6ed22318af..0aec35d2c6 100644 --- a/nova/tests/virt/vmwareapi/fake.py +++ b/nova/tests/virt/vmwareapi/fake.py @@ -39,6 +39,7 @@ _FAKE_FILE_SIZE = 1024 _db_content = {} +_array_types = {} LOG = logging.getLogger(__name__) @@ -109,6 +110,24 @@ def _convert_to_array_of_opt_val(optvals): return array_of_optv +def _create_array_of_type(t): + """Returns an array to contain objects of type t.""" + if t in _array_types: + return _array_types[t]() + + array_type_name = 'ArrayOf%s' % t + array_type = type(array_type_name, (DataObject,), {}) + + def __init__(self): + super(array_type, self).__init__(array_type_name) + setattr(self, t, []) + + setattr(array_type, '__init__', __init__) + + _array_types[t] = array_type + return array_type() + + class FakeRetrieveResult(object): """Object to retrieve a ObjectContent list.""" @@ -395,7 +414,11 @@ def __init__(self, **kwargs): self.set("summary.config.numCpu", kwargs.get("numCpu", 1)) self.set("summary.config.memorySizeMB", kwargs.get("mem", 1)) self.set("summary.config.instanceUuid", kwargs.get("instanceUuid")) - self.set("config.hardware.device", kwargs.get("virtual_device", None)) + + devices = _create_array_of_type('VirtualDevice') + devices.VirtualDevice = kwargs.get("virtual_device", []) + self.set("config.hardware.device", devices) + exconfig_do = kwargs.get("extra_config", None) self.set("config.extraConfig", _convert_to_array_of_opt_val(exconfig_do)) @@ -403,7 +426,7 @@ def __init__(self, **kwargs): for optval in exconfig_do: self.set('config.extraConfig["%s"]' % optval.key, optval) self.set('runtime.host', kwargs.get("runtime_host", None)) - self.device = kwargs.get("virtual_device") + self.device = kwargs.get("virtual_device", []) # Sample of diagnostics data is below. config = [ ('template', False), @@ -497,8 +520,9 @@ def reconfig(self, factory, val): controller = VirtualLsiLogicController() controller.key = controller_key - self.set("config.hardware.device", [disk, controller, - self.device[0]]) + devices = _create_array_of_type('VirtualDevice') + devices.VirtualDevice = [disk, controller, self.device[0]] + self.set("config.hardware.device", devices) except AttributeError: pass @@ -1140,6 +1164,11 @@ def _create_vm(self, method, *args, **kwargs): else: ds = create_datastore(vm_path.datastore, 1024, 500) + devices = [] + for device_change in config_spec.deviceChange: + if device_change.operation == 'add': + devices.append(device_change.device) + host = _db_content["HostSystem"].keys()[0] vm_dict = {"name": config_spec.name, "ds": [ds], @@ -1149,7 +1178,7 @@ def _create_vm(self, method, *args, **kwargs): "numCpu": config_spec.numCPUs, "mem": config_spec.memoryMB, "extra_config": config_spec.extraConfig, - "virtual_device": config_spec.deviceChange, + "virtual_device": devices, "instanceUuid": config_spec.instanceUuid} virtual_machine = VirtualMachine(**vm_dict) _create_object("VirtualMachine", virtual_machine) @@ -1230,7 +1259,8 @@ def _clone_vm(self, method, *args, **kwargs): "numCpu": source_vm_mdo.get("summary.config.numCpu"), "mem": source_vm_mdo.get("summary.config.memorySizeMB"), "extra_config": source_vm_mdo.get("config.extraConfig").OptionValue, - "virtual_device": source_vm_mdo.get("config.hardware.device"), + "virtual_device": + source_vm_mdo.get("config.hardware.device").VirtualDevice, "instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")} if clone_spec.config is not None: diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 2188c88af7..aef706ff90 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -623,7 +623,7 @@ def _check_vm_record(self, num_instances=1, powered_on=True): self.type_data['memory_mb']) self.assertEqual( - vm.get("config.hardware.device")[2].device.obj_name, + vm.get("config.hardware.device").VirtualDevice[2].obj_name, "ns0:VirtualE1000") if powered_on: # Check that the VM is running according to Nova From cdcd3734693f86f52e458bf8f72c5ce84a57a08e Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 30 Jun 2014 16:01:35 +0100 Subject: [PATCH 386/486] VMware: refactor spawn() code to build a new VM Move the spawn() code which builds a new VM into vmops.build_virtual_machine(). We remove the duplicate check of instance_name. Other than that this is straight code motion. In addition, we add a basic test of the function of build_virtual_machine, which tests that all given configuration is reflected in the resulting virtual machine. This change has been split out of https://review.openstack.org/#/c/87002/, which was written by Shawn Hartsock. partial blueprint vmware-spawn-refactor Co-authored-by: Shawn Hartsock Change-Id: Id9c95966eefb644fdaf36465e7b3d3583826314a --- nova/tests/virt/vmwareapi/test_vmops.py | 77 +++++++++++++++++++++---- nova/virt/vmwareapi/vmops.py | 53 ++++++++++------- 2 files changed, 99 insertions(+), 31 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index 034bef43a8..01b50a8fe9 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -88,7 +88,7 @@ def setUp(self): self._vmops = vmops.VMwareVCVMOps(self._session, self._virtapi, None) self._image_id = nova.tests.image.fake.get_valid_image_id() - values = { + self._instance_values = { 'name': 'fake_name', 'uuid': 'fake_uuid', 'vcpus': 1, @@ -98,7 +98,7 @@ def setUp(self): 'node': 'respool-1001(MyResPoolName)' } self._instance = fake_instance.fake_instance_obj( - self._context, **values) + self._context, **self._instance_values) fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds') self._ds = ds_util.Datastore( @@ -130,15 +130,18 @@ def setUp(self): vlan=None, bridge_interface=None, injected=True) + self._network_values = { + 'id': None, + 'address': 'DE:AD:BE:EF:00:00', + 'network': network, + 'type': None, + 'devname': None, + 'ovs_interfaceid': None, + 'rxtx_cap': 3 + } self.network_info = network_model.NetworkInfo([ - network_model.VIF(id=None, - address='DE:AD:BE:EF:00:00', - network=network, - type=None, - devname=None, - ovs_interfaceid=None, - rxtx_cap=3) - ]) + network_model.VIF(**self._network_values) + ]) pure_IPv6_network = network_model.Network(id=0, bridge='fa0', label='fake', @@ -754,3 +757,57 @@ def test_spawn_with_block_device_info(self): 'block_device_mapping': [{'connection_info': 'fake'}] } self._test_spawn(block_device_info=block_device_info) + + @mock.patch('nova.virt.vmwareapi.driver.VMwareAPISession._get_vim_object') + def test_build_virtual_machine(self, mock_get_vim_object): + mock_get_vim_object.return_value = vmwareapi_fake.FakeVim() + + fake_session = driver.VMwareAPISession() + fake_vmops = vmops.VMwareVCVMOps(fake_session, None, None) + + image_id = nova.tests.image.fake.get_valid_image_id() + image = vmware_images.VMwareImage(image_id=image_id) + + vm_ref = fake_vmops.build_virtual_machine(self._instance, + 'fake-instance-name', + image, self._dc_info, + self._ds, self.network_info) + + vm = vmwareapi_fake._get_object(vm_ref) + + # Test basic VM parameters + self.assertEqual('fake-instance-name', vm.name) + # NOTE(mdbooth): The instanceUuid behaviour below is apparently + # deliberate. + self.assertEqual('fake-instance-name', + vm.get('summary.config.instanceUuid')) + self.assertEqual(self._instance_values['vcpus'], + vm.get('summary.config.numCpu')) + self.assertEqual(self._instance_values['memory_mb'], + vm.get('summary.config.memorySizeMB')) + + # Test NSX config + for optval in vm.get('config.extraConfig').OptionValue: + if optval.key == 'nvp.vm-uuid': + self.assertEqual(self._instance_values['uuid'], optval.value) + break + else: + self.fail('nvp.vm-uuid not found in extraConfig') + + # Test that the VM is associated with the specified datastore + datastores = vm.datastore.ManagedObjectReference + self.assertEqual(1, len(datastores)) + + datastore = vmwareapi_fake._get_object(datastores[0]) + self.assertEqual(self._ds.name, datastore.get('summary.name')) + + # Test that the VM's network is configured as specified + devices = vm.get('config.hardware.device').VirtualDevice + for device in devices: + if device.obj_name != 'ns0:VirtualE1000': + continue + self.assertEqual(self._network_values['address'], + device.macAddress) + break + else: + self.fail('NIC not configured') diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 642d035945..b1a8aacb5c 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -159,6 +159,30 @@ def _extend_if_required(self, dc_info, image_info, instance, self._extend_virtual_disk(instance, size_in_kb, root_vmdk_path, dc_info.ref) + def build_virtual_machine(self, instance, instance_name, image_info, + dc_info, datastore, network_info): + node_mo_id = vm_util.get_mo_id_from_instance(instance) + res_pool_ref = vm_util.get_res_pool_ref(self._session, + self._cluster, node_mo_id) + vif_infos = vmwarevif.get_vif_info(self._session, + self._cluster, + utils.is_neutron(), + image_info.vif_model, + network_info) + + # Get the create vm config spec + client_factory = self._session._get_vim().client.factory + config_spec = vm_util.get_vm_create_spec(client_factory, + instance, + instance_name, + datastore.name, + vif_infos, + image_info.os_type) + # Create the VM + vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder, + config_spec, res_pool_ref) + return vm_ref + def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None, instance_name=None, power_on=True): @@ -206,27 +230,14 @@ def spawn(self, context, instance, image_meta, injected_files, raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) - node_mo_id = vm_util.get_mo_id_from_instance(instance) - res_pool_ref = vm_util.get_res_pool_ref(self._session, - self._cluster, node_mo_id) - - vif_infos = vmwarevif.get_vif_info(self._session, self._cluster, - utils.is_neutron(), - image_info.vif_model, - network_info) - - # Get the instance name. In some cases this may differ from the 'uuid', - # for example when the spawn of a rescue instance takes place. - if not instance_name: - instance_name = instance.uuid - - # Create the VM - config_spec = vm_util.get_vm_create_spec( - client_factory, instance, instance_name, - datastore.name, vif_infos, image_info.os_type) - - vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder, - config_spec, res_pool_ref) + # Creates the virtual machine. The virtual machine reference returned + # is unique within Virtual Center. + vm_ref = self.build_virtual_machine(instance, + instance_name, + image_info, + dc_info, + datastore, + network_info) # Cache the vm_ref. This saves a remote call to the VC. This uses the # instance_name. This covers all use cases including rescue and resize. From b5c80a04df03879a1639cc81bd1dacd95f566799 Mon Sep 17 00:00:00 2001 From: Vui Lam Date: Thu, 24 Apr 2014 11:59:42 -0700 Subject: [PATCH 387/486] VMware: spawn refactor _configure_config_drive Factor out the code to configure the virtual machine with a new config drive iso image. partial blueprint vmware-spawn-refactor Change-Id: Ie387445ae83e89fc436243a4c8ee9030d5221510 --- nova/tests/virt/vmwareapi/test_vmops.py | 20 +++++++++++++++ nova/virt/vmwareapi/vmops.py | 34 +++++++++++++++---------- 2 files changed, 41 insertions(+), 13 deletions(-) diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py index 01b50a8fe9..e95bbcb26f 100644 --- a/nova/tests/virt/vmwareapi/test_vmops.py +++ b/nova/tests/virt/vmwareapi/test_vmops.py @@ -523,6 +523,26 @@ def test_finish_revert_migration_power_on(self): def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(power_on=False) + @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm') + @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive') + def test_configure_config_drive(self, + mock_create_config_drive, + mock_attach_cdrom_to_vm): + injected_files = mock.Mock() + admin_password = mock.Mock() + vm_ref = mock.Mock() + mock_create_config_drive.return_value = "fake_iso_path" + self._vmops._configure_config_drive( + self._instance, vm_ref, self._dc_info, self._ds, + injected_files, admin_password) + + upload_iso_path = self._ds.build_path("fake_iso_path") + mock_create_config_drive.assert_called_once_with(self._instance, + injected_files, admin_password, self._ds.name, + self._dc_info.name, self._instance.uuid, "Fake-CookieJar") + mock_attach_cdrom_to_vm.assert_called_once_with( + vm_ref, self._instance, self._ds.ref, str(upload_iso_path)) + @mock.patch.object(vmops.LOG, 'debug') @mock.patch('nova.virt.vmwareapi.volumeops.VMwareVolumeOps' '.attach_root_volume') diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index b1a8aacb5c..c4b50e33e8 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -159,6 +159,24 @@ def _extend_if_required(self, dc_info, image_info, instance, self._extend_virtual_disk(instance, size_in_kb, root_vmdk_path, dc_info.ref) + def _configure_config_drive(self, instance, vm_ref, dc_info, datastore, + injected_files, admin_password): + session_vim = self._session._get_vim() + cookies = session_vim.client.options.transport.cookiejar + + uploaded_iso_path = self._create_config_drive(instance, + injected_files, + admin_password, + datastore.name, + dc_info.name, + instance['uuid'], + cookies) + uploaded_iso_path = datastore.build_path(uploaded_iso_path) + self._attach_cdrom_to_vm( + vm_ref, instance, + datastore.ref, + str(uploaded_iso_path)) + def build_virtual_machine(self, instance, instance_name, image_info, dc_info, datastore, network_info): node_mo_id = vm_util.get_mo_id_from_instance(instance) @@ -531,19 +549,9 @@ def spawn(self, context, instance, image_meta, injected_files, uploaded_file_path) if configdrive.required_by(instance): - uploaded_iso_path = self._create_config_drive(instance, - injected_files, - admin_password, - datastore.name, - dc_info.name, - instance.uuid, - cookies) - uploaded_iso_path = ds_util.DatastorePath(datastore.name, - uploaded_iso_path) - self._attach_cdrom_to_vm( - vm_ref, instance, - datastore.ref, - str(uploaded_iso_path)) + self._configure_config_drive( + instance, vm_ref, dc_info, datastore, injected_files, + admin_password) if power_on: vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref) From 0d9c76964b9826fff84cbd5a76025b4eed0b0fe1 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Fri, 15 Aug 2014 08:09:36 -0700 Subject: [PATCH 388/486] Image cache tests: ensure that assertEquals has expected param first Update the tests to ensure that the expected parameter is first. TrivialFix Change-Id: I5a860d7a217f69bc1d5e7e0e470e724e62a69372 --- nova/tests/virt/test_imagecache.py | 32 ++++++++++++++---------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/nova/tests/virt/test_imagecache.py b/nova/tests/virt/test_imagecache.py index a2b9ff9f59..693b0625d6 100644 --- a/nova/tests/virt/test_imagecache.py +++ b/nova/tests/virt/test_imagecache.py @@ -25,13 +25,11 @@ class ImageCacheManagerTests(test.NoDBTestCase): def test_configurationi_defaults(self): - self.assertEqual(CONF.image_cache_manager_interval, - 2400) - self.assertEqual(CONF.image_cache_subdirectory_name, - '_base') + self.assertEqual(2400, CONF.image_cache_manager_interval) + self.assertEqual('_base', CONF.image_cache_subdirectory_name) self.assertTrue(CONF.remove_unused_base_images) - self.assertEqual(CONF.remove_unused_original_minimum_age_seconds, - 24 * 3600) + self.assertEqual(24 * 3600, + CONF.remove_unused_original_minimum_age_seconds) def test_cache_manager(self): cache_manager = imagecache.ImageCacheManager() @@ -41,8 +39,8 @@ def test_cache_manager(self): self.assertRaises(NotImplementedError, cache_manager._get_base) base_images = cache_manager._list_base_images(None) - self.assertEqual(base_images['unexplained_images'], []) - self.assertEqual(base_images['originals'], []) + self.assertEqual([], base_images['unexplained_images']) + self.assertEqual([], base_images['originals']) self.assertRaises(NotImplementedError, cache_manager._age_and_verify_cached_images, None, [], None) @@ -78,7 +76,7 @@ def test_list_running_instances(self): running = image_cache_manager._list_running_instances(None, all_instances) - self.assertEqual(len(running['used_images']), 4) + self.assertEqual(4, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual((1, 1, ['instance-00000002', @@ -92,11 +90,11 @@ def test_list_running_instances(self): self.assertIn('instance-00000001', running['instance_names']) self.assertIn('123', running['instance_names']) - self.assertEqual(len(running['image_popularity']), 4) - self.assertEqual(running['image_popularity']['1'], 1) - self.assertEqual(running['image_popularity']['2'], 2) - self.assertEqual(running['image_popularity']['21'], 1) - self.assertEqual(running['image_popularity']['22'], 1) + self.assertEqual(4, len(running['image_popularity'])) + self.assertEqual(1, running['image_popularity']['1']) + self.assertEqual(2, running['image_popularity']['2']) + self.assertEqual(1, running['image_popularity']['21']) + self.assertEqual(1, running['image_popularity']['22']) def test_list_resizing_instances(self): instances = [{'image_ref': '1', @@ -113,12 +111,12 @@ def test_list_resizing_instances(self): running = image_cache_manager._list_running_instances(None, all_instances) - self.assertEqual(len(running['used_images']), 1) + self.assertEqual(1, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual(set(['instance-00000001', '123', 'instance-00000001_resize', '123_resize']), running['instance_names']) - self.assertEqual(len(running['image_popularity']), 1) - self.assertEqual(running['image_popularity']['1'], 1) + self.assertEqual(1, len(running['image_popularity'])) + self.assertEqual(1, running['image_popularity']['1']) From 2eacece42268b526c4ea8d8906cc646471818ba9 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 10 Aug 2014 01:53:14 -0700 Subject: [PATCH 389/486] VMware: remove Host class The class was only used by the ESX driver which was remove in the commit 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b. Change-Id: I026e4c0f463e78e33592c6408d2e688c74a0714b --- nova/virt/vmwareapi/host.py | 51 ------------------------------------- 1 file changed, 51 deletions(-) diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py index ba9a01de75..2213126ed1 100644 --- a/nova/virt/vmwareapi/host.py +++ b/nova/virt/vmwareapi/host.py @@ -28,57 +28,6 @@ LOG = logging.getLogger(__name__) -class Host(object): - """Implements host related operations.""" - def __init__(self, session): - self._session = session - - def host_power_action(self, host, action): - """Reboots or shuts down the host.""" - host_mor = vm_util.get_host_ref(self._session) - LOG.debug("%(action)s %(host)s", {'action': action, 'host': host}) - if action == "reboot": - host_task = self._session._call_method( - self._session._get_vim(), - "RebootHost_Task", host_mor, - force=False) - elif action == "shutdown": - host_task = self._session._call_method( - self._session._get_vim(), - "ShutdownHost_Task", host_mor, - force=False) - elif action == "startup": - host_task = self._session._call_method( - self._session._get_vim(), - "PowerUpHostFromStandBy_Task", host_mor, - timeoutSec=60) - self._session._wait_for_task(host_task) - - def host_maintenance_mode(self, host, mode): - """Start/Stop host maintenance window. On start, it triggers - guest VMs evacuation. - """ - host_mor = vm_util.get_host_ref(self._session) - LOG.debug("Set maintenance mod on %(host)s to %(mode)s", - {'host': host, 'mode': mode}) - if mode: - host_task = self._session._call_method( - self._session._get_vim(), - "EnterMaintenanceMode_Task", - host_mor, timeout=0, - evacuatePoweredOffVms=True) - else: - host_task = self._session._call_method( - self._session._get_vim(), - "ExitMaintenanceMode_Task", - host_mor, timeout=0) - self._session._wait_for_task(host_task) - - def set_host_enabled(self, _host, enabled): - """Sets the specified host's ability to accept new instances.""" - pass - - def _get_ds_capacity_and_freespace(session, cluster=None): try: ds = ds_util.get_datastore(session, cluster) From 55d813cc47eed27e5b43dff89ff525a50c4649a8 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 10 Aug 2014 03:19:49 -0700 Subject: [PATCH 390/486] VMware: remove specific VC support from class VMwareVolumeOps This support is no longer needed as we only support the VC driver since commit 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b. Change-Id: Iad71fceb580620b1843ce3cf5d926343d8113945 --- nova/virt/vmwareapi/driver.py | 3 +-- nova/virt/vmwareapi/volumeops.py | 9 ++------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index f0ba5a41f4..a815794a22 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -301,8 +301,7 @@ def _update_resources(self): added_nodes = set(self.dict_mors.keys()) - set(self._resource_keys) for node in added_nodes: _volumeops = volumeops.VMwareVolumeOps(self._session, - self.dict_mors[node]['cluster_mor'], - vc_support=True) + self.dict_mors[node]['cluster_mor']) _vmops = vmops.VMwareVCVMOps(self._session, self._virtapi, _volumeops, self.dict_mors[node]['cluster_mor'], diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py index 6a5e1c5790..1ca01f90da 100644 --- a/nova/virt/vmwareapi/volumeops.py +++ b/nova/virt/vmwareapi/volumeops.py @@ -33,10 +33,9 @@ class VMwareVolumeOps(object): """Management class for Volume-related tasks.""" - def __init__(self, session, cluster=None, vc_support=False): + def __init__(self, session, cluster=None): self._session = session self._cluster = cluster - self._vc_support = vc_support def attach_disk_to_vm(self, vm_ref, instance, adapter_type, disk_type, vmdk_path=None, @@ -439,10 +438,6 @@ def _consolidate_vmdk_volume(self, instance, vm_ref, device, volume_ref): is on the datastore of the instance. """ - # Consolidation only supported with VC driver - if not self._vc_support: - return - original_device = self._get_vmdk_base_volume_device(volume_ref) original_device_path = original_device.backing.fileName @@ -568,7 +563,7 @@ def attach_root_volume(self, connection_info, instance, mountpoint, driver_type = connection_info['driver_volume_type'] LOG.debug("Root volume attach. Driver type: %s", driver_type, instance=instance) - if self._vc_support and driver_type == 'vmdk': + if driver_type == 'vmdk': vm_ref = vm_util.get_vm_ref(self._session, instance) data = connection_info['data'] # Get the volume ref From cc5254bf483e1efea969ad0948a226a3ec270495 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sun, 27 Jul 2014 20:42:55 -0400 Subject: [PATCH 391/486] Pull transfer module unit tests from glance tests Separates the unit tests of the file transfer download module (the only one that is implemented in nova.image.download) out into its own unit test file test_transfer_modules.py out of test_glance.py. In the process, properly limited the unit test boundaries of the transfer module unit tests to just the transfer module class' download() method, instead of testing all the way through the GlanceImageService class unnecessarily (since that is already tested thoroughly in test_glance.py). As an added benefit, the unit tests of the download and transfer module stuff went from around 4 seconds to less than a quarter-second for all the tests. Change-Id: I2ff6e31359d343b78dc4d03cf2beafd8a99b0a29 --- nova/tests/image/test_glance.py | 475 ++++++++++------------ nova/tests/image/test_transfer_modules.py | 101 +++++ 2 files changed, 322 insertions(+), 254 deletions(-) create mode 100644 nova/tests/image/test_transfer_modules.py diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 2effafa67a..431c82a320 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -15,11 +15,8 @@ import datetime -import filecmp -import os import random import sys -import tempfile import time import glanceclient.exc @@ -31,10 +28,8 @@ from nova import exception from nova.image import glance from nova import test -from nova.tests.api.openstack import fakes from nova.tests.glance import stubs as glance_stubs from nova import utils -import nova.virt.libvirt.utils as lv_utils CONF = cfg.CONF NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" @@ -193,265 +188,237 @@ def test_get_remote_service_from_href(self, gcwi_mocked): use_ssl=False) -class NullWriter(object): - """Used to test ImageService.get which takes a writer object.""" - - def write(self, *arg, **kwargs): - pass - - -class TestGlanceImageService(test.NoDBTestCase): - """Tests the Glance image service. - - At a high level, the translations involved are: - - 1. Glance -> ImageService - This is needed so we can support - multple ImageServices (Glance, Local, etc) - - 2. ImageService -> API - This is needed so we can support multple - APIs (OpenStack, EC2) +class TestDownloadNoDirectUri(test.NoDBTestCase): + """Tests the download method of the GlanceImageService when the + default of not allowing direct URI transfers is set. """ - def setUp(self): - super(TestGlanceImageService, self).setUp() - fakes.stub_out_compute_api_snapshot(self.stubs) - - self.client = glance_stubs.StubGlanceClient() - self.service = self._create_image_service(self.client) - self.context = context.RequestContext('fake', 'fake', auth_token=True) - self.files_to_clean = [] - - def tearDown(self): - super(TestGlanceImageService, self).tearDown() - for f in self.files_to_clean: - try: - os.unlink(f) - except os.error: - pass - - def _get_tempfile(self): - (outfd, config_filename) = tempfile.mkstemp(prefix='nova_glance_tests') - self.files_to_clean.append(config_filename) - return (outfd, config_filename) - - def _create_image_service(self, client): - def _fake_create_glance_client(context, host, port, use_ssl, version): - return client - - self.stubs.Set(glance, '_create_glance_client', - _fake_create_glance_client) - - client_wrapper = glance.GlanceClientWrapper( - 'fake', 'fake_host', 9292) - return glance.GlanceImageService(client=client_wrapper) - - def test_download_with_retries(self): - tries = [0] - - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that fails the first time, then succeeds.""" - def get(self, image_id): - if tries[0] == 0: - tries[0] = 1 - raise glanceclient.exc.ServiceUnavailable('') - else: - return {} - - client = MyGlanceStubClient() - service = self._create_image_service(client) - image_id = 1 # doesn't matter - writer = NullWriter() + @mock.patch('__builtin__.open') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_no_data_no_dest_path(self, show_mock, open_mock): + client = mock.MagicMock() + client.call.return_value = mock.sentinel.image_chunks + ctx = mock.sentinel.ctx + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id) - # When retries are disabled, we should get an exception - self.flags(num_retries=0, group='glance') - self.assertRaises(exception.GlanceConnectionFailed, - service.download, self.context, image_id, data=writer) + self.assertFalse(show_mock.called) + self.assertFalse(open_mock.called) + client.call.assert_called_once_with(ctx, 1, 'data', + mock.sentinel.image_id) + self.assertEqual(mock.sentinel.image_chunks, res) - # Now lets enable retries. No exception should happen now. - tries = [0] - self.flags(num_retries=1, group='glance') - service.download(self.context, image_id, data=writer) + @mock.patch('__builtin__.open') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_data_no_dest_path(self, show_mock, open_mock): + client = mock.MagicMock() + client.call.return_value = [1, 2, 3] + ctx = mock.sentinel.ctx + data = mock.MagicMock() + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id, data=data) - def test_download_file_url(self): - self.flags(allowed_direct_url_schemes=['file'], group='glance') + self.assertFalse(show_mock.called) + self.assertFalse(open_mock.called) + client.call.assert_called_once_with(ctx, 1, 'data', + mock.sentinel.image_id) + self.assertIsNone(res) + data.write.assert_has_calls( + [ + mock.call(1), + mock.call(2), + mock.call(3) + ] + ) + self.assertFalse(data.close.called) + + @mock.patch('__builtin__.open') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_no_data_dest_path(self, show_mock, open_mock): + client = mock.MagicMock() + client.call.return_value = [1, 2, 3] + ctx = mock.sentinel.ctx + writer = mock.MagicMock() + open_mock.return_value = writer + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id, + dst_path=mock.sentinel.dst_path) - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that returns a file url.""" - - (outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc') - outf = os.fdopen(outfd, 'w') - inf = open('/dev/urandom', 'r') - for i in range(10): - _data = inf.read(1024) - outf.write(_data) - outf.close() - - def get(self, image_id): - return type('GlanceTestDirectUrlMeta', (object,), - {'status': 'active', - 'direct_url': 'file://%s' + self.s_tmpfname}) - - client = MyGlanceStubClient() - (outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst') - os.close(outfd) - - service = self._create_image_service(client) - image_id = 1 # doesn't matter - - service.download(self.context, image_id, dst_path=tmpfname) - - # compare the two files - rc = filecmp.cmp(tmpfname, client.s_tmpfname) - self.assertTrue(rc, "The file %s and %s should be the same" % - (tmpfname, client.s_tmpfname)) - os.remove(client.s_tmpfname) - os.remove(tmpfname) - - @mock.patch('nova.virt.libvirt.utils.copy_image') - def test_download_module_filesystem_match(self, mock_copy_image): - - mountpoint = '/' - fs_id = 'someid' - desc = {'id': fs_id, 'mountpoint': mountpoint} - - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - outer_test = self - - def get(self, image_id): - return type('GlanceLocations', (object,), - {'status': 'active', - 'locations': [ - {'url': 'file:///' + os.devnull, - 'metadata': desc}]}) - - def data(self, image_id): - self.outer_test.fail('This should not be called because the ' - 'transfer module should have intercepted ' - 'it.') - - image_id = 1 # doesn't matter - client = MyGlanceStubClient() - self.flags(allowed_direct_url_schemes=['file'], group='glance') - self.flags(group='image_file_url', filesystems=['gluster']) - service = self._create_image_service(client) - # NOTE(Jbresnah) The following options must be added after the module - # has added the specific groups. - self.flags(group='image_file_url:gluster', id=fs_id) - self.flags(group='image_file_url:gluster', mountpoint=mountpoint) - - dest_file = os.devnull - service.download(self.context, image_id, dst_path=dest_file) - mock_copy_image.assert_called_once_with('/' + os.devnull, os.devnull) - - def test_download_module_no_filesystem_match(self): - mountpoint = '/' - fs_id = 'someid' - desc = {'id': fs_id, 'mountpoint': mountpoint} - some_data = "sfxvdwjer" - - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - outer_test = self - - def get(self, image_id): - return type('GlanceLocations', (object,), - {'status': 'active', - 'locations': [ - {'url': 'file:///' + os.devnull, - 'metadata': desc}]}) - - def data(self, image_id): - return some_data - - def _fake_copyfile(source, dest): - self.fail('This should not be called because a match should not ' - 'have been found.') - self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile) - - image_id = 1 # doesn't matter - client = MyGlanceStubClient() - self.flags(allowed_direct_url_schemes=['file'], group='glance') - self.flags(group='image_file_url', filesystems=['gluster']) - service = self._create_image_service(client) - # NOTE(Jbresnah) The following options must be added after the module - # has added the specific groups. - self.flags(group='image_file_url:gluster', id='someotherid') - self.flags(group='image_file_url:gluster', mountpoint=mountpoint) - - service.download(self.context, image_id, - dst_path=os.devnull, - data=None) - - def test_download_module_mountpoints(self): - glance_mount = '/glance/mount/point' - _, data_filename = self._get_tempfile() - nova_mount = os.path.dirname(data_filename) - source_path = os.path.basename(data_filename) - file_url = 'file://%s' % os.path.join(glance_mount, source_path) - file_system_id = 'test_FS_ID' - file_system_desc = {'id': file_system_id, 'mountpoint': glance_mount} - - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - outer_test = self - - def get(self, image_id): - return type('GlanceLocations', (object,), - {'status': 'active', - 'locations': [{'url': file_url, - 'metadata': file_system_desc}]}) - - def data(self, image_id): - self.outer_test.fail('This should not be called because the ' - 'transfer module should have intercepted ' - 'it.') - - self.copy_called = False - - def _fake_copyfile(source, dest): - self.assertEqual(source, data_filename) - self.copy_called = True - self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile) + self.assertFalse(show_mock.called) + client.call.assert_called_once_with(ctx, 1, 'data', + mock.sentinel.image_id) + open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb') + self.assertIsNone(res) + writer.write.assert_has_calls( + [ + mock.call(1), + mock.call(2), + mock.call(3) + ] + ) + writer.close.assert_called_once_with() + + @mock.patch('__builtin__.open') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_data_dest_path(self, show_mock, open_mock): + # NOTE(jaypipes): This really shouldn't be allowed, but because of the + # horrible design of the download() method in GlanceImageService, no + # error is raised, and the dst_path is ignored... + # #TODO(jaypipes): Fix the aforementioned horrible design of + # the download() method. + client = mock.MagicMock() + client.call.return_value = [1, 2, 3] + ctx = mock.sentinel.ctx + data = mock.MagicMock() + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id, data=data) + self.assertFalse(show_mock.called) + self.assertFalse(open_mock.called) + client.call.assert_called_once_with(ctx, 1, 'data', + mock.sentinel.image_id) + self.assertIsNone(res) + data.write.assert_has_calls( + [ + mock.call(1), + mock.call(2), + mock.call(3) + ] + ) + self.assertFalse(data.close.called) + + @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_direct_file_uri(self, show_mock, get_tran_mock): self.flags(allowed_direct_url_schemes=['file'], group='glance') - self.flags(group='image_file_url', filesystems=['gluster']) - image_id = 1 # doesn't matter - client = MyGlanceStubClient() - service = self._create_image_service(client) - self.flags(group='image_file_url:gluster', id=file_system_id) - self.flags(group='image_file_url:gluster', mountpoint=nova_mount) - - service.download(self.context, image_id, dst_path=os.devnull) - self.assertTrue(self.copy_called) - - @mock.patch('nova.virt.libvirt.utils.copy_image') - def test_download_module_file_bad_module(self, mock_copy_image): - _, data_filename = self._get_tempfile() - file_url = 'applesauce://%s' % data_filename - - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - data_called = False - - def get(self, image_id): - return type('GlanceLocations', (object,), - {'status': 'active', - 'locations': [{'url': file_url, - 'metadata': {}}]}) - - def data(self, image_id): - self.data_called = True - return "someData" - - self.flags(allowed_direct_url_schemes=['applesauce'], group='glance') + show_mock.return_value = { + 'locations': [ + { + 'url': 'file:///files/image', + 'metadata': mock.sentinel.loc_meta + } + ] + } + tran_mod = mock.MagicMock() + get_tran_mock.return_value = tran_mod + client = mock.MagicMock() + ctx = mock.sentinel.ctx + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id, + dst_path=mock.sentinel.dst_path) + + self.assertIsNone(res) + self.assertFalse(client.call.called) + show_mock.assert_called_once_with(ctx, + mock.sentinel.image_id, + include_locations=True) + get_tran_mock.assert_called_once_with('file') + tran_mod.download.assert_called_once_with(ctx, mock.ANY, + mock.sentinel.dst_path, + mock.sentinel.loc_meta) + + @mock.patch('__builtin__.open') + @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_direct_exception_fallback(self, show_mock, + get_tran_mock, + open_mock): + # Test that we fall back to downloading to the dst_path + # if the download method of the transfer module raised + # an exception. self.flags(allowed_direct_url_schemes=['file'], group='glance') - image_id = 1 # doesn't matter - client = MyGlanceStubClient() - service = self._create_image_service(client) - - # by not calling copyfileobj in the file download module we verify - # that the requirements were not met for its use - service.download(self.context, image_id, dst_path=os.devnull) - self.assertTrue(client.data_called) - self.assertFalse(mock_copy_image.called) + show_mock.return_value = { + 'locations': [ + { + 'url': 'file:///files/image', + 'metadata': mock.sentinel.loc_meta + } + ] + } + tran_mod = mock.MagicMock() + tran_mod.download.side_effect = Exception + get_tran_mock.return_value = tran_mod + client = mock.MagicMock() + client.call.return_value = [1, 2, 3] + ctx = mock.sentinel.ctx + writer = mock.MagicMock() + open_mock.return_value = writer + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id, + dst_path=mock.sentinel.dst_path) + + self.assertIsNone(res) + show_mock.assert_called_once_with(ctx, + mock.sentinel.image_id, + include_locations=True) + get_tran_mock.assert_called_once_with('file') + tran_mod.download.assert_called_once_with(ctx, mock.ANY, + mock.sentinel.dst_path, + mock.sentinel.loc_meta) + client.call.assert_called_once_with(ctx, 1, 'data', + mock.sentinel.image_id) + # NOTE(jaypipes): log messages call open() in part of the + # download path, so here, we just check that the last open() + # call was done for the dst_path file descriptor. + open_mock.assert_called_with(mock.sentinel.dst_path, 'wb') + self.assertIsNone(res) + writer.write.assert_has_calls( + [ + mock.call(1), + mock.call(2), + mock.call(3) + ] + ) + + @mock.patch('__builtin__.open') + @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module') + @mock.patch('nova.image.glance.GlanceImageService.show') + def test_download_direct_no_mod_fallback(self, show_mock, + get_tran_mock, + open_mock): + # Test that we fall back to downloading to the dst_path + # if no appropriate transfer module is found... + # an exception. + self.flags(allowed_direct_url_schemes=['funky'], group='glance') + show_mock.return_value = { + 'locations': [ + { + 'url': 'file:///files/image', + 'metadata': mock.sentinel.loc_meta + } + ] + } + get_tran_mock.return_value = None + client = mock.MagicMock() + client.call.return_value = [1, 2, 3] + ctx = mock.sentinel.ctx + writer = mock.MagicMock() + open_mock.return_value = writer + service = glance.GlanceImageService(client) + res = service.download(ctx, mock.sentinel.image_id, + dst_path=mock.sentinel.dst_path) + + self.assertIsNone(res) + show_mock.assert_called_once_with(ctx, + mock.sentinel.image_id, + include_locations=True) + get_tran_mock.assert_called_once_with('file') + client.call.assert_called_once_with(ctx, 1, 'data', + mock.sentinel.image_id) + # NOTE(jaypipes): log messages call open() in part of the + # download path, so here, we just check that the last open() + # call was done for the dst_path file descriptor. + open_mock.assert_called_with(mock.sentinel.dst_path, 'wb') + self.assertIsNone(res) + writer.write.assert_has_calls( + [ + mock.call(1), + mock.call(2), + mock.call(3) + ] + ) + writer.close.assert_called_once_with() def _create_failing_glance_client(info): diff --git a/nova/tests/image/test_transfer_modules.py b/nova/tests/image/test_transfer_modules.py new file mode 100644 index 0000000000..51920c36aa --- /dev/null +++ b/nova/tests/image/test_transfer_modules.py @@ -0,0 +1,101 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urlparse + +import mock + +from nova import exception +from nova.image.download import file as tm_file +from nova import test + + +class TestFileTransferModule(test.NoDBTestCase): + + @mock.patch('nova.virt.libvirt.utils.copy_image') + def test_filesystem_success(self, copy_mock): + self.flags(allowed_direct_url_schemes=['file'], group='glance') + self.flags(group='image_file_url', filesystems=['gluster']) + + mountpoint = '/gluster' + url = 'file:///gluster/my/image/path' + url_parts = urlparse.urlparse(url) + fs_id = 'someid' + loc_meta = { + 'id': fs_id, + 'mountpoint': mountpoint + } + dst_file = mock.MagicMock() + + tm = tm_file.FileTransfer() + + # NOTE(Jbresnah) The following options must be added after the module + # has added the specific groups. + self.flags(group='image_file_url:gluster', id=fs_id) + self.flags(group='image_file_url:gluster', mountpoint=mountpoint) + + tm.download(mock.sentinel.ctx, url_parts, dst_file, loc_meta) + copy_mock.assert_called_once_with('/gluster/my/image/path', dst_file) + + @mock.patch('nova.virt.libvirt.utils.copy_image') + def test_filesystem_mismatched_mountpoint(self, copy_mock): + self.flags(allowed_direct_url_schemes=['file'], group='glance') + self.flags(group='image_file_url', filesystems=['gluster']) + + mountpoint = '/gluster' + # Should include the mountpoint before my/image/path + url = 'file:///my/image/path' + url_parts = urlparse.urlparse(url) + fs_id = 'someid' + loc_meta = { + 'id': fs_id, + 'mountpoint': mountpoint + } + dst_file = mock.MagicMock() + + tm = tm_file.FileTransfer() + + self.flags(group='image_file_url:gluster', id=fs_id) + self.flags(group='image_file_url:gluster', mountpoint=mountpoint) + + self.assertRaises(exception.ImageDownloadModuleMetaDataError, + tm.download, mock.sentinel.ctx, url_parts, + dst_file, loc_meta) + self.assertFalse(copy_mock.called) + + @mock.patch('nova.virt.libvirt.utils.copy_image') + def test_filesystem_mismatched_filesystem(self, copy_mock): + self.flags(allowed_direct_url_schemes=['file'], group='glance') + self.flags(group='image_file_url', filesystems=['gluster']) + + mountpoint = '/gluster' + # Should include the mountpoint before my/image/path + url = 'file:///my/image/path' + url_parts = urlparse.urlparse(url) + fs_id = 'someid' + loc_meta = { + 'id': 'funky', + 'mountpoint': mountpoint + } + dst_file = mock.MagicMock() + + tm = tm_file.FileTransfer() + + self.flags(group='image_file_url:gluster', id=fs_id) + self.flags(group='image_file_url:gluster', mountpoint=mountpoint) + + self.assertRaises(exception.ImageDownloadModuleError, + tm.download, mock.sentinel.ctx, url_parts, + dst_file, loc_meta) + self.assertFalse(copy_mock.called) From 486623c0ca6d71ace5e6b0cd234be05f10f13dc4 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sat, 9 Aug 2014 15:04:57 -0700 Subject: [PATCH 392/486] Removes GlanceClient stubs Refactors the tests in nova.tests.image.test_glance that were checking the behaviour of both the GlanceClientWrapper retry logic and the glanceclient.Client creation to use mock instead of the FakeGlanceClient in nova.tests.glance.stubs. The fake stub client was actually masking issues in the existing test cases, including not properly checking the identity headers that are actually supplied to the real glanceclient.Client constructor. Change-Id: I1c114df8e4ab2fccd966ed4af22181881590c443 Partial-bug: #1293938 --- nova/tests/glance/stubs.py | 91 -------- nova/tests/image/test_glance.py | 403 ++++++++++++++++---------------- 2 files changed, 204 insertions(+), 290 deletions(-) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index d088c299ed..42d5e4ab6b 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -12,97 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import glanceclient.exc - - -NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" - - -class StubGlanceClient(object): - - def __init__(self, images=None, version=None, endpoint=None, **params): - self.auth_token = params.get('token') - self.identity_headers = params.get('identity_headers') - if self.identity_headers: - if self.identity_headers.get('X-Auth-Token'): - self.auth_token = (self.identity_headers.get('X-Auth_Token') or - self.auth_token) - del self.identity_headers['X-Auth-Token'] - self._images = [] - _images = images or [] - map(lambda image: self.create(**image), _images) - - # NOTE(bcwaldon): HACK to get client.images.* to work - self.images = lambda: None - for fn in ('list', 'get', 'data', 'create', 'update', 'delete'): - setattr(self.images, fn, getattr(self, fn)) - - # TODO(bcwaldon): implement filters - def list(self, filters=None, marker=None, limit=30, page_size=20): - if marker is None: - index = 0 - else: - for index, image in enumerate(self._images): - if image.id == str(marker): - index += 1 - break - else: - raise glanceclient.exc.BadRequest('Marker not found') - return self._images[index:index + limit] - - def get(self, image_id): - for image in self._images: - if image.id == str(image_id): - return image - raise glanceclient.exc.NotFound(image_id) - - def data(self, image_id): - self.get(image_id) - return [] - - def create(self, **metadata): - metadata['created_at'] = NOW_GLANCE_FORMAT - metadata['updated_at'] = NOW_GLANCE_FORMAT - - self._images.append(FakeImage(metadata)) - - try: - image_id = str(metadata['id']) - except KeyError: - # auto-generate an id if one wasn't provided - image_id = str(len(self._images)) - - self._images[-1].id = image_id - - return self._images[-1] - - def update(self, image_id, **metadata): - for i, image in enumerate(self._images): - if image.id == str(image_id): - # If you try to update a non-authorized image, it raises - # HTTPForbidden - if image.owner == 'authorized_fake': - raise glanceclient.exc.HTTPForbidden - - for k, v in metadata.items(): - setattr(self._images[i], k, v) - return self._images[i] - raise glanceclient.exc.NotFound(image_id) - - def delete(self, image_id): - for i, image in enumerate(self._images): - if image.id == image_id: - # When you delete an image from glance, it sets the status to - # DELETED. If you try to delete a DELETED image, it raises - # HTTPForbidden. - image_data = self._images[i] - if image_data.deleted: - raise glanceclient.exc.HTTPForbidden() - image_data.deleted = True - image_data.deleted_at = NOW_GLANCE_FORMAT - return - raise glanceclient.exc.NotFound(image_id) - class FakeImage(object): def __init__(self, metadata): diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 431c82a320..fbc795a84c 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -15,9 +15,7 @@ import datetime -import random import sys -import time import glanceclient.exc import mock @@ -188,6 +186,210 @@ def test_get_remote_service_from_href(self, gcwi_mocked): use_ssl=False) +class TestCreateGlanceClient(test.NoDBTestCase): + @mock.patch('nova.utils.is_valid_ipv6') + @mock.patch('glanceclient.Client') + def test_headers_passed_glanceclient(self, init_mock, ipv6_mock): + self.flags(auth_strategy='keystone') + ipv6_mock.return_value = False + auth_token = 'token' + ctx = context.RequestContext('fake', 'fake', auth_token=auth_token) + host = 'host4' + port = 9295 + use_ssl = False + + expected_endpoint = 'http://host4:9295' + expected_params = { + 'identity_headers': { + 'X-Auth-Token': 'token', + 'X-User-Id': 'fake', + 'X-Roles': '', + 'X-Tenant-Id': 'fake', + 'X-Service-Catalog': '[]', + 'X-Identity-Status': 'Confirmed' + }, + 'token': 'token' + } + glance._create_glance_client(ctx, host, port, use_ssl) + init_mock.assert_called_once_with('1', expected_endpoint, + **expected_params) + + # Test the version is properly passed to glanceclient. + ipv6_mock.reset_mock() + init_mock.reset_mock() + + expected_endpoint = 'http://host4:9295' + expected_params = { + 'identity_headers': { + 'X-Auth-Token': 'token', + 'X-User-Id': 'fake', + 'X-Roles': '', + 'X-Tenant-Id': 'fake', + 'X-Service-Catalog': '[]', + 'X-Identity-Status': 'Confirmed' + }, + 'token': 'token' + } + glance._create_glance_client(ctx, host, port, use_ssl, version=2) + init_mock.assert_called_once_with('2', expected_endpoint, + **expected_params) + + # Test that non-keystone auth strategy doesn't bother to pass + # glanceclient all the Keystone-related headers. + ipv6_mock.reset_mock() + init_mock.reset_mock() + + self.flags(auth_strategy='non-keystone') + + expected_endpoint = 'http://host4:9295' + expected_params = { + } + glance._create_glance_client(ctx, host, port, use_ssl) + init_mock.assert_called_once_with('1', expected_endpoint, + **expected_params) + + # Test that the IPv6 bracketization adapts the endpoint properly. + ipv6_mock.reset_mock() + init_mock.reset_mock() + + ipv6_mock.return_value = True + + expected_endpoint = 'http://[host4]:9295' + expected_params = { + } + glance._create_glance_client(ctx, host, port, use_ssl) + init_mock.assert_called_once_with('1', expected_endpoint, + **expected_params) + + +class TestGlanceClientWrapper(test.NoDBTestCase): + @mock.patch('time.sleep') + @mock.patch('nova.image.glance._create_glance_client') + def test_static_client_without_retries(self, create_client_mock, + sleep_mock): + client_mock = mock.MagicMock() + images_mock = mock.MagicMock() + images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable + type(client_mock).images = mock.PropertyMock(return_value=images_mock) + create_client_mock.return_value = client_mock + self.flags(num_retries=0, group='glance') + + ctx = context.RequestContext('fake', 'fake') + host = 'host4' + port = 9295 + use_ssl = False + + client = glance.GlanceClientWrapper(context=ctx, host=host, port=port, + use_ssl=use_ssl) + create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1) + self.assertRaises(exception.GlanceConnectionFailed, + client.call, ctx, 1, 'get', 'meow') + self.assertFalse(sleep_mock.called) + + @mock.patch('time.sleep') + @mock.patch('nova.image.glance._create_glance_client') + def test_static_client_with_retries(self, create_client_mock, + sleep_mock): + self.flags(num_retries=1, group='glance') + client_mock = mock.MagicMock() + images_mock = mock.MagicMock() + images_mock.get.side_effect = [ + glanceclient.exc.ServiceUnavailable, + None + ] + type(client_mock).images = mock.PropertyMock(return_value=images_mock) + create_client_mock.return_value = client_mock + + ctx = context.RequestContext('fake', 'fake') + host = 'host4' + port = 9295 + use_ssl = False + + client = glance.GlanceClientWrapper(context=ctx, + host=host, port=port, use_ssl=use_ssl) + client.call(ctx, 1, 'get', 'meow') + sleep_mock.assert_called_once_with(1) + + @mock.patch('random.shuffle') + @mock.patch('time.sleep') + @mock.patch('nova.image.glance._create_glance_client') + def test_default_client_without_retries(self, create_client_mock, + sleep_mock, shuffle_mock): + api_servers = [ + 'host1:9292', + 'https://host2:9293', + 'http://host3:9294' + ] + client_mock = mock.MagicMock() + images_mock = mock.MagicMock() + images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable + type(client_mock).images = mock.PropertyMock(return_value=images_mock) + create_client_mock.return_value = client_mock + + shuffle_mock.return_value = api_servers + self.flags(num_retries=0, group='glance') + self.flags(api_servers=api_servers, group='glance') + + # Here we are testing the behaviour that calling client.call() twice + # when there are no retries will cycle through the api_servers and not + # sleep (which would be an indication of a retry) + ctx = context.RequestContext('fake', 'fake') + + client = glance.GlanceClientWrapper() + self.assertRaises(exception.GlanceConnectionFailed, + client.call, ctx, 1, 'get', 'meow') + self.assertFalse(sleep_mock.called) + + self.assertRaises(exception.GlanceConnectionFailed, + client.call, ctx, 1, 'get', 'meow') + self.assertFalse(sleep_mock.called) + + create_client_mock.assert_has_calls( + [ + mock.call(ctx, 'host1', 9292, False, 1), + mock.call(ctx, 'host2', 9293, True, 1), + ] + ) + + @mock.patch('random.shuffle') + @mock.patch('time.sleep') + @mock.patch('nova.image.glance._create_glance_client') + def test_default_client_with_retries(self, create_client_mock, + sleep_mock, shuffle_mock): + api_servers = [ + 'host1:9292', + 'https://host2:9293', + 'http://host3:9294' + ] + client_mock = mock.MagicMock() + images_mock = mock.MagicMock() + images_mock.get.side_effect = [ + glanceclient.exc.ServiceUnavailable, + None + ] + type(client_mock).images = mock.PropertyMock(return_value=images_mock) + create_client_mock.return_value = client_mock + + self.flags(num_retries=1, group='glance') + self.flags(api_servers=api_servers, group='glance') + + ctx = context.RequestContext('fake', 'fake') + + # And here we're testing that if num_retries is not 0, then we attempt + # to retry the same connection action against the next client. + + client = glance.GlanceClientWrapper() + client.call(ctx, 1, 'get', 'meow') + + create_client_mock.assert_has_calls( + [ + mock.call(ctx, 'host1', 9292, False, 1), + mock.call(ctx, 'host2', 9293, True, 1), + ] + ) + sleep_mock.assert_called_once_with(1) + + class TestDownloadNoDirectUri(test.NoDBTestCase): """Tests the download method of the GlanceImageService when the @@ -421,18 +623,6 @@ def test_download_direct_no_mod_fallback(self, show_mock, writer.close.assert_called_once_with() -def _create_failing_glance_client(info): - class MyGlanceStubClient(glance_stubs.StubGlanceClient): - """A client that fails the first time, then succeeds.""" - def get(self, image_id): - info['num_calls'] += 1 - if info['num_calls'] == 1: - raise glanceclient.exc.ServiceUnavailable('') - return {} - - return MyGlanceStubClient() - - class TestIsImageAvailable(test.NoDBTestCase): """Tests the internal _is_image_available function.""" @@ -927,191 +1117,6 @@ def test_delete_client_failure(self): mock.sentinel.image_id) -class TestGlanceClientWrapper(test.NoDBTestCase): - - def setUp(self): - super(TestGlanceClientWrapper, self).setUp() - # host1 has no scheme, which is http by default - self.flags(api_servers=['host1:9292', 'https://host2:9293', - 'http://host3:9294'], group='glance') - - # Make the test run fast - def _fake_sleep(secs): - pass - self.stubs.Set(time, 'sleep', _fake_sleep) - - def test_headers_passed_glanceclient(self): - auth_token = 'auth_token' - ctxt = context.RequestContext('fake', 'fake', auth_token=auth_token) - fake_host = 'host4' - fake_port = 9295 - fake_use_ssl = False - - def _get_fake_glanceclient(version, endpoint, **params): - fake_client = glance_stubs.StubGlanceClient(version, - endpoint, **params) - self.assertIsNotNone(fake_client.auth_token) - self.assertIsNotNone(fake_client.identity_headers) - self.assertEqual(fake_client.identity_header['X-Auth_Token'], - auth_token) - self.assertEqual(fake_client.identity_header['X-User-Id'], 'fake') - self.assertIsNone(fake_client.identity_header['X-Roles']) - self.assertIsNone(fake_client.identity_header['X-Tenant-Id']) - self.assertIsNone(fake_client.identity_header['X-Service-Catalog']) - self.assertEqual(fake_client. - identity_header['X-Identity-Status'], - 'Confirmed') - - self.stubs.Set(glanceclient.Client, '__init__', - _get_fake_glanceclient) - - glance._create_glance_client(ctxt, fake_host, fake_port, fake_use_ssl) - - def test_static_client_without_retries(self): - self.flags(num_retries=0, group='glance') - - ctxt = context.RequestContext('fake', 'fake') - fake_host = 'host4' - fake_port = 9295 - fake_use_ssl = False - - info = {'num_calls': 0} - - def _fake_create_glance_client(context, host, port, use_ssl, version): - self.assertEqual(host, fake_host) - self.assertEqual(port, fake_port) - self.assertEqual(use_ssl, fake_use_ssl) - return _create_failing_glance_client(info) - - self.stubs.Set(glance, '_create_glance_client', - _fake_create_glance_client) - - client = glance.GlanceClientWrapper(context=ctxt, - host=fake_host, port=fake_port, use_ssl=fake_use_ssl) - self.assertRaises(exception.GlanceConnectionFailed, - client.call, ctxt, 1, 'get', 'meow') - self.assertEqual(info['num_calls'], 1) - - def test_default_client_without_retries(self): - self.flags(num_retries=0, group='glance') - - ctxt = context.RequestContext('fake', 'fake') - - info = {'num_calls': 0, - 'host': 'host1', - 'port': 9292, - 'use_ssl': False} - - # Leave the list in a known-order - def _fake_shuffle(servers): - pass - - def _fake_create_glance_client(context, host, port, use_ssl, version): - self.assertEqual(host, info['host']) - self.assertEqual(port, info['port']) - self.assertEqual(use_ssl, info['use_ssl']) - return _create_failing_glance_client(info) - - self.stubs.Set(random, 'shuffle', _fake_shuffle) - self.stubs.Set(glance, '_create_glance_client', - _fake_create_glance_client) - - client = glance.GlanceClientWrapper() - client2 = glance.GlanceClientWrapper() - self.assertRaises(exception.GlanceConnectionFailed, - client.call, ctxt, 1, 'get', 'meow') - self.assertEqual(info['num_calls'], 1) - - info = {'num_calls': 0, - 'host': 'host2', - 'port': 9293, - 'use_ssl': True} - - def _fake_shuffle2(servers): - # fake shuffle in a known manner - servers.append(servers.pop(0)) - - self.stubs.Set(random, 'shuffle', _fake_shuffle2) - - self.assertRaises(exception.GlanceConnectionFailed, - client2.call, ctxt, 1, 'get', 'meow') - self.assertEqual(info['num_calls'], 1) - - def test_static_client_with_retries(self): - self.flags(num_retries=1, group='glance') - - ctxt = context.RequestContext('fake', 'fake') - fake_host = 'host4' - fake_port = 9295 - fake_use_ssl = False - - info = {'num_calls': 0} - - def _fake_create_glance_client(context, host, port, use_ssl, version): - self.assertEqual(host, fake_host) - self.assertEqual(port, fake_port) - self.assertEqual(use_ssl, fake_use_ssl) - return _create_failing_glance_client(info) - - self.stubs.Set(glance, '_create_glance_client', - _fake_create_glance_client) - - client = glance.GlanceClientWrapper(context=ctxt, - host=fake_host, port=fake_port, use_ssl=fake_use_ssl) - client.call(ctxt, 1, 'get', 'meow') - self.assertEqual(info['num_calls'], 2) - - def test_default_client_with_retries(self): - self.flags(num_retries=1, group='glance') - - ctxt = context.RequestContext('fake', 'fake') - - info = {'num_calls': 0, - 'host0': 'host1', - 'port0': 9292, - 'use_ssl0': False, - 'host1': 'host2', - 'port1': 9293, - 'use_ssl1': True} - - # Leave the list in a known-order - def _fake_shuffle(servers): - pass - - def _fake_create_glance_client(context, host, port, use_ssl, version): - attempt = info['num_calls'] - self.assertEqual(host, info['host%s' % attempt]) - self.assertEqual(port, info['port%s' % attempt]) - self.assertEqual(use_ssl, info['use_ssl%s' % attempt]) - return _create_failing_glance_client(info) - - self.stubs.Set(random, 'shuffle', _fake_shuffle) - self.stubs.Set(glance, '_create_glance_client', - _fake_create_glance_client) - - client = glance.GlanceClientWrapper() - client2 = glance.GlanceClientWrapper() - client.call(ctxt, 1, 'get', 'meow') - self.assertEqual(info['num_calls'], 2) - - def _fake_shuffle2(servers): - # fake shuffle in a known manner - servers.append(servers.pop(0)) - - self.stubs.Set(random, 'shuffle', _fake_shuffle2) - - info = {'num_calls': 0, - 'host0': 'host2', - 'port0': 9293, - 'use_ssl0': True, - 'host1': 'host3', - 'port1': 9294, - 'use_ssl1': False} - - client2.call(ctxt, 1, 'get', 'meow') - self.assertEqual(info['num_calls'], 2) - - class TestGlanceUrl(test.NoDBTestCase): def test_generate_glance_http_url(self): From 8a50755b9df445a07140f385f1ff32db20bf683b Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sat, 9 Aug 2014 15:46:01 -0700 Subject: [PATCH 393/486] Remove final use of glance_stubs Removes the final piece of glance_stubs from the image unit tests. Change-Id: I0db3b6c83edaf91466e85d423ce75b3e75fd3517 Closes-bug: #1293938 --- nova/tests/glance/__init__.py | 18 ---------------- nova/tests/glance/stubs.py | 37 --------------------------------- nova/tests/image/test_glance.py | 17 +++++++++++++-- 3 files changed, 15 insertions(+), 57 deletions(-) delete mode 100644 nova/tests/glance/__init__.py delete mode 100644 nova/tests/glance/stubs.py diff --git a/nova/tests/glance/__init__.py b/nova/tests/glance/__init__.py deleted file mode 100644 index eac840c7ed..0000000000 --- a/nova/tests/glance/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2011 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`glance` -- Stubs for Glance -================================= -""" diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py deleted file mode 100644 index 42d5e4ab6b..0000000000 --- a/nova/tests/glance/stubs.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2011 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class FakeImage(object): - def __init__(self, metadata): - IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', - 'container_format', 'checksum', 'id', - 'name', 'created_at', 'updated_at', - 'deleted', 'deleted_at', 'status', - 'min_disk', 'min_ram', 'is_public'] - raw = dict.fromkeys(IMAGE_ATTRIBUTES) - raw.update(metadata) - self.__dict__['raw'] = raw - - def __getattr__(self, key): - try: - return self.__dict__['raw'][key] - except KeyError: - raise AttributeError(key) - - def __setattr__(self, key, value): - try: - self.__dict__['raw'][key] = value - except KeyError: - raise AttributeError(key) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index fbc795a84c..d72061e218 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -26,7 +26,6 @@ from nova import exception from nova.image import glance from nova import test -from nova.tests.glance import stubs as glance_stubs from nova import utils CONF = cfg.CONF @@ -57,7 +56,9 @@ def test_convert_timestamps_to_datetimes(self): def _test_extracting_missing_attributes(self, include_locations): # Verify behavior from glance objects that are missing attributes - class MyFakeGlanceImage(glance_stubs.FakeImage): + # TODO(jaypipes): Find a better way of testing this crappy + # glanceclient magic object stuff. + class MyFakeGlanceImage(object): def __init__(self, metadata): IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at', 'updated_at', 'status', 'min_disk', @@ -66,6 +67,18 @@ def __init__(self, metadata): raw.update(metadata) self.__dict__['raw'] = raw + def __getattr__(self, key): + try: + return self.__dict__['raw'][key] + except KeyError: + raise AttributeError(key) + + def __setattr__(self, key, value): + try: + self.__dict__['raw'][key] = value + except KeyError: + raise AttributeError(key) + metadata = { 'id': 1, 'created_at': NOW_DATETIME, From 1dbf5119b2cf51e70ad52474f0e6776edae9d6cf Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 15 Aug 2014 09:27:52 -0700 Subject: [PATCH 394/486] Track object version relationships This adds another developer "helper trap" for inter-object version dependencies. If an object that another object depends on is bumped in version, this test will note the change and remind the developer (or reviewer) that the parent needs a version bump and backport rule added. Change-Id: Ia81d034f850173401cd84a79b15ae0f7c80b9653 --- nova/tests/objects/test_objects.py | 49 ++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index a4eadcfce8..05d20f9fce 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -980,6 +980,22 @@ def test_object_serialization_iterables(self): } +object_relationships = { + 'BlockDeviceMapping': {'Instance': '1.13'}, + 'FixedIP': {'Instance': '1.13', 'Network': '1.2', + 'VirtualInterface': '1.0'}, + 'FloatingIP': {'FixedIP': '1.1'}, + 'Instance': {'InstanceFault': '1.2', + 'InstanceInfoCache': '1.5', + 'PciDeviceList': '1.0', + 'SecurityGroupList': '1.0'}, + 'MyObj': {'MyOwnedObject': '1.0'}, + 'SecurityGroupRule': {'SecurityGroup': '1.1'}, + 'Service': {'ComputeNode': '1.4'}, + 'TestSubclassedObject': {'MyOwnedObject': '1.0'} +} + + class TestObjectVersions(test.TestCase): def setUp(self): super(TestObjectVersions, self).setUp() @@ -1031,3 +1047,36 @@ def test_versions(self): 'Some objects have changed; please make sure the ' 'versions have been bumped, and then update their ' 'hashes here.') + + def _build_tree(self, tree, obj_class): + obj_name = obj_class.obj_name() + if obj_name in tree: + return + + for name, field in obj_class.fields.items(): + if isinstance(field._type, fields.Object): + sub_obj_name = field._type._obj_name + sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0] + self._build_tree(tree, sub_obj_class) + tree.setdefault(obj_name, {}) + tree[obj_name][sub_obj_name] = sub_obj_class.VERSION + + def test_relationships(self): + tree = {} + for obj_name in base.NovaObject._obj_classes.keys(): + self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0]) + + stored = set([(x, str(y)) for x, y in object_relationships.items()]) + computed = set([(x, str(y)) for x, y in tree.items()]) + changed = stored - computed + expected = {} + actual = {} + for name, deps in changed: + expected[name] = object_relationships.get(name) + actual[name] = tree.get(name) + self.assertEqual(expected, actual, + 'Some objects have changed dependencies. ' + 'Please make sure to bump the versions of ' + 'parent objects and provide a rule in their ' + 'obj_make_compatible() routines to backlevel ' + 'the child object.') From 062b1f8c0f6ba09ab6764ea512c3615fc93aaf08 Mon Sep 17 00:00:00 2001 From: Thang Pham Date: Sat, 16 Aug 2014 21:40:29 -0400 Subject: [PATCH 395/486] Move _is_mapping to more central location The logic to identify volumes is currently a nested function in _default_block_device_names, named _is_mapping. It should be moved to a more general location so others could utilize it and allow it to be properly unit tested. The following patch moves _is_mapping to nova/virt/block_device.py and renames it to is_block_device_mapping. Change-Id: I560abc4b57ca5bd195282af7cd1ab9bbf7600b67 Closes-Bug: #1351810 --- nova/compute/manager.py | 8 ++------ nova/tests/virt/test_block_device.py | 16 ++++++++++++++++ nova/virt/block_device.py | 6 ++++++ 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 718f285f1f..674c05c58d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1693,16 +1693,12 @@ def _default_block_device_names(self, context, instance, if update_root_bdm: root_bdm.save() - def _is_mapping(bdm): - return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') - and bdm.destination_type == 'volume' - and driver_block_device.is_implemented(bdm)) - ephemerals = filter(block_device.new_format_is_ephemeral, block_devices) swap = filter(block_device.new_format_is_swap, block_devices) - block_device_mapping = filter(_is_mapping, block_devices) + block_device_mapping = filter( + driver_block_device.is_block_device_mapping, block_devices) self._default_device_names_for_instance(instance, root_device_name, diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py index fa82c72cdf..a16ade1e4f 100644 --- a/nova/tests/virt/test_block_device.py +++ b/nova/tests/virt/test_block_device.py @@ -666,3 +666,19 @@ def test_is_implemented(self): local_image = self.image_bdm.copy() local_image['destination_type'] = 'local' self.assertFalse(driver_block_device.is_implemented(local_image)) + + def test_is_block_device_mapping(self): + test_swap = self.driver_classes['swap'](self.swap_bdm) + test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm) + test_image = self.driver_classes['image'](self.image_bdm) + test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm) + test_volume = self.driver_classes['volume'](self.volume_bdm) + test_blank = self.driver_classes['blank'](self.blank_bdm) + + for bdm in (test_image, test_snapshot, test_volume, test_blank): + self.assertTrue(driver_block_device.is_block_device_mapping( + bdm._bdm_obj)) + + for bdm in (test_swap, test_ephemeral): + self.assertFalse(driver_block_device.is_block_device_mapping( + bdm._bdm_obj)) diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py index 1f8adbe0e6..e58d6be1ea 100644 --- a/nova/virt/block_device.py +++ b/nova/virt/block_device.py @@ -466,3 +466,9 @@ def is_implemented(bdm): except _NotTransformable: pass return False + + +def is_block_device_mapping(bdm): + return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') + and bdm.destination_type == 'volume' + and is_implemented(bdm)) From db7433a2794e734e98d947957456d21ecf0ef4a7 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 17 Aug 2014 01:11:50 -0700 Subject: [PATCH 396/486] Image caching tests: use list comprehension Use a list comprehension for creating a list of instance objects for the tests. Change-Id: If25d024eeeb1503d7a7dc2dbc895b2bf3152c7db --- nova/tests/virt/libvirt/test_imagecache.py | 8 ++------ nova/tests/virt/vmwareapi/test_imagecache.py | 7 ++----- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/nova/tests/virt/libvirt/test_imagecache.py b/nova/tests/virt/libvirt/test_imagecache.py index bef87dad69..de238f5c11 100644 --- a/nova/tests/virt/libvirt/test_imagecache.py +++ b/nova/tests/virt/libvirt/test_imagecache.py @@ -623,12 +623,8 @@ def isfile(path): 'uuid': '456', 'vm_state': '', 'task_state': ''}] - - all_instances = [] - for instance in instances: - all_instances.append(fake_instance.fake_instance_obj( - None, **instance)) - + all_instances = [fake_instance.fake_instance_obj(None, **instance) + for instance in instances] image_cache_manager = imagecache.ImageCacheManager() # Fake the utils call which finds the backing image diff --git a/nova/tests/virt/vmwareapi/test_imagecache.py b/nova/tests/virt/vmwareapi/test_imagecache.py index 671c3ac9e1..fa3ceeb583 100644 --- a/nova/tests/virt/vmwareapi/test_imagecache.py +++ b/nova/tests/virt/vmwareapi/test_imagecache.py @@ -234,11 +234,8 @@ def fake_age_cached_images(context, datastore, 'uuid': '456', 'vm_state': '', 'task_state': ''}] - all_instances = [] - for instance in instances: - all_instances.append(fake_instance.fake_instance_obj( - None, **instance)) - + all_instances = [fake_instance.fake_instance_obj(None, **instance) + for instance in instances] self.images = set(['1', '2']) datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref') dc_info = vmops.DcInfo(ref='dc_ref', name='name', From af8f401eb58154dc19e2732c212ea97a4aa6726c Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Thu, 14 Aug 2014 17:23:23 +0800 Subject: [PATCH 397/486] Change 'admin_password' into v2 style for servers extension This patch changes 'admin_password' to 'adminPass' that same with v2 api. And also change the related unittest. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: Ie5675f98bb15c55a191a207171e2ffe3be8e6deb --- .../all_extensions/server-post-resp.json | 2 +- .../consoles/server-post-resp.json | 2 +- .../server-action-rebuild-resp.json | 2 +- .../os-access-ips/server-post-resp.json | 2 +- .../os-admin-actions/server-post-resp.json | 2 +- .../os-admin-password/server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../os-config-drive/server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../os-console-output/server-post-resp.json | 2 +- .../os-create-backup/server-post-resp.json | 2 +- .../os-deferred-delete/server-post-resp.json | 2 +- .../os-evacuate/server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../os-extended-status/server-post-resp.json | 2 +- .../os-extended-volumes/server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../os-lock-server/server-post-resp.json | 2 +- .../os-migrate-server/server-post-resp.json | 2 +- .../os-multinic/server-post-resp.json | 2 +- .../os-pause-server/server-post-resp.json | 2 +- .../api_samples/os-pci/server-post-resp.json | 2 +- .../os-remote-consoles/server-post-resp.json | 2 +- .../os-rescue/server-post-resp.json | 2 +- .../scheduler-hints-post-resp.json | 2 +- .../os-security-groups/server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../server-post-resp.json | 2 +- .../os-server-usage/server-post-resp.json | 2 +- .../os-shelve/server-post-resp.json | 2 +- .../os-suspend-server/server-post-resp.json | 2 +- .../os-user-data/userdata-post-resp.json | 2 +- .../server-ips/server-post-resp.json | 2 +- .../server-metadata/server-post-resp.json | 2 +- ...ver-action-rebuild-preserve-ephemeral.json | 2 +- .../servers/server-action-rebuild-resp.json | 2 +- .../servers/server-action-rebuild.json | 2 +- .../api_samples/servers/server-post-resp.json | 2 +- .../openstack/compute/plugins/v3/servers.py | 8 ++++---- .../plugins/v3/test_multiple_create.py | 2 +- .../compute/plugins/v3/test_server_actions.py | 10 +++++----- .../compute/plugins/v3/test_servers.py | 20 +++++++++---------- .../all_extensions/server-post-resp.json.tpl | 2 +- .../consoles/server-post-resp.json.tpl | 2 +- .../server-action-rebuild-resp.json.tpl | 2 +- .../os-access-ips/server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-config-drive/server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-evacuate/server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-lock-server/server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-multinic/server-post-resp.json.tpl | 2 +- .../os-pause-server/server-post-resp.json.tpl | 2 +- .../os-pci/server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-rescue/server-post-resp.json.tpl | 2 +- .../scheduler-hints-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-server-usage/server-post-resp.json.tpl | 2 +- .../os-shelve/server-post-resp.json.tpl | 2 +- .../server-post-resp.json.tpl | 2 +- .../os-user-data/userdata-post-resp.json.tpl | 2 +- .../server-ips/server-post-resp.json.tpl | 2 +- .../server-metadata/server-post-resp.json.tpl | 2 +- ...n-rebuild-preserve-ephemeral-resp.json.tpl | 2 +- ...action-rebuild-preserve-ephemeral.json.tpl | 2 +- .../server-action-rebuild-resp.json.tpl | 2 +- .../servers/server-action-rebuild.json.tpl | 2 +- .../servers/server-post-resp.json.tpl | 2 +- 85 files changed, 101 insertions(+), 101 deletions(-) diff --git a/doc/v3/api_samples/all_extensions/server-post-resp.json b/doc/v3/api_samples/all_extensions/server-post-resp.json index 495c2a9c7d..1557202de0 100644 --- a/doc/v3/api_samples/all_extensions/server-post-resp.json +++ b/doc/v3/api_samples/all_extensions/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "zPnp2GseTqG4", + "adminPass": "zPnp2GseTqG4", "id": "8195065c-fea4-4d57-b93f-5c5c63fe90e8", "links": [ { diff --git a/doc/v3/api_samples/consoles/server-post-resp.json b/doc/v3/api_samples/consoles/server-post-resp.json index a83ab07f7d..0dfaf9148e 100644 --- a/doc/v3/api_samples/consoles/server-post-resp.json +++ b/doc/v3/api_samples/consoles/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "C3r5xKS73Y7S", + "adminPass": "C3r5xKS73Y7S", "id": "3f19c120-f64a-4faf-848e-33900b752f83", "links": [ { diff --git a/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json b/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json index 3d92616716..22e09b6f7b 100644 --- a/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json +++ b/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json @@ -10,7 +10,7 @@ } ] }, - "admin_password": "99WHAxN8gpvg", + "adminPass": "99WHAxN8gpvg", "created": "2013-11-06T07:51:09Z", "flavor": { "id": "1", diff --git a/doc/v3/api_samples/os-access-ips/server-post-resp.json b/doc/v3/api_samples/os-access-ips/server-post-resp.json index d3edc5eb41..a756cde26e 100644 --- a/doc/v3/api_samples/os-access-ips/server-post-resp.json +++ b/doc/v3/api_samples/os-access-ips/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "n7JGBda664QG", + "adminPass": "n7JGBda664QG", "id": "934760e1-2b0b-4f9e-a916-eac1e69839dc", "links": [ { diff --git a/doc/v3/api_samples/os-admin-actions/server-post-resp.json b/doc/v3/api_samples/os-admin-actions/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-admin-actions/server-post-resp.json +++ b/doc/v3/api_samples/os-admin-actions/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-admin-password/server-post-resp.json b/doc/v3/api_samples/os-admin-password/server-post-resp.json index b67cb859bd..e81b2aab7c 100644 --- a/doc/v3/api_samples/os-admin-password/server-post-resp.json +++ b/doc/v3/api_samples/os-admin-password/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "oCYTjD2KnRtB", + "adminPass": "oCYTjD2KnRtB", "id": "aa3130b9-dffc-417b-aa03-93d5394a2fa6", "links": [ { diff --git a/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json b/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json index cc809d3075..4ef70a0a90 100644 --- a/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json +++ b/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "fjPxt8d8YcSR", + "adminPass": "fjPxt8d8YcSR", "id": "c937be78-c423-495b-a99a-e590ab6f30ba", "links": [ { diff --git a/doc/v3/api_samples/os-availability-zone/server-post-resp.json b/doc/v3/api_samples/os-availability-zone/server-post-resp.json index df24e0d2b9..7cd8d888b5 100644 --- a/doc/v3/api_samples/os-availability-zone/server-post-resp.json +++ b/doc/v3/api_samples/os-availability-zone/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "a2apKsfp7Rom", + "adminPass": "a2apKsfp7Rom", "id": "e88c3898-e971-42e5-8325-b7ff921efb15", "links": [ { diff --git a/doc/v3/api_samples/os-config-drive/server-post-resp.json b/doc/v3/api_samples/os-config-drive/server-post-resp.json index cdc2c592f3..88388573a7 100644 --- a/doc/v3/api_samples/os-config-drive/server-post-resp.json +++ b/doc/v3/api_samples/os-config-drive/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "h2cx3Lm47BJc", + "adminPass": "h2cx3Lm47BJc", "id": "f0318e69-11eb-4aed-9840-59b6c72beee8", "links": [ { diff --git a/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json b/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json index 3d22d59aa6..e48a97813e 100644 --- a/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json +++ b/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "Kwg5tff6KiUU", + "adminPass": "Kwg5tff6KiUU", "id": "8619225c-67c8-424f-9b46-cec5bad137a2", "links": [ { diff --git a/doc/v3/api_samples/os-console-output/server-post-resp.json b/doc/v3/api_samples/os-console-output/server-post-resp.json index 08ca48799e..66e933a74c 100644 --- a/doc/v3/api_samples/os-console-output/server-post-resp.json +++ b/doc/v3/api_samples/os-console-output/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "8mQaAgHHFsDp", + "adminPass": "8mQaAgHHFsDp", "id": "71e8cf04-0486-46ae-9d18-e51f4978fa13", "links": [ { diff --git a/doc/v3/api_samples/os-create-backup/server-post-resp.json b/doc/v3/api_samples/os-create-backup/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-create-backup/server-post-resp.json +++ b/doc/v3/api_samples/os-create-backup/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-deferred-delete/server-post-resp.json b/doc/v3/api_samples/os-deferred-delete/server-post-resp.json index af2d33b2f4..871dfcd0b0 100644 --- a/doc/v3/api_samples/os-deferred-delete/server-post-resp.json +++ b/doc/v3/api_samples/os-deferred-delete/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "hqgU8QzT54wF", + "adminPass": "hqgU8QzT54wF", "id": "4a3bde9b-fa37-408d-b167-68e1724c923e", "links": [ { diff --git a/doc/v3/api_samples/os-evacuate/server-post-resp.json b/doc/v3/api_samples/os-evacuate/server-post-resp.json index 19e0537fd6..a15d69b508 100644 --- a/doc/v3/api_samples/os-evacuate/server-post-resp.json +++ b/doc/v3/api_samples/os-evacuate/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "xCQm2Cs2vSFx", + "adminPass": "xCQm2Cs2vSFx", "id": "5f1fbc62-29ed-4e4a-9f15-8affc5e0a796", "links": [ { diff --git a/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json b/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json index 6a51c12d7b..8498a90291 100644 --- a/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json +++ b/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "87taVVreqh6R", + "adminPass": "87taVVreqh6R", "id": "f22e4521-d03a-4e9f-9fd3-016b9e227219", "links": [ { diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json b/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json index 80b3a2b5b6..9b72d53c9e 100644 --- a/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json +++ b/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "UCvmH8nHXm66", + "adminPass": "UCvmH8nHXm66", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "links": [ { diff --git a/doc/v3/api_samples/os-extended-status/server-post-resp.json b/doc/v3/api_samples/os-extended-status/server-post-resp.json index 9d953d9b94..08cf336e8a 100644 --- a/doc/v3/api_samples/os-extended-status/server-post-resp.json +++ b/doc/v3/api_samples/os-extended-status/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "EugbD2jAD2V2", + "adminPass": "EugbD2jAD2V2", "id": "a868cb5e-c794-47bf-9cd8-e302b72bb94b", "links": [ { diff --git a/doc/v3/api_samples/os-extended-volumes/server-post-resp.json b/doc/v3/api_samples/os-extended-volumes/server-post-resp.json index 25a567d74c..f49035cbc1 100644 --- a/doc/v3/api_samples/os-extended-volumes/server-post-resp.json +++ b/doc/v3/api_samples/os-extended-volumes/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "uNqGTziMK3px", + "adminPass": "uNqGTziMK3px", "id": "7d62983e-23df-4320-bc89-bbc77f2a2e40", "links": [ { diff --git a/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json b/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json index b858f1c655..7ba0133976 100644 --- a/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json +++ b/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "5bjyggD6SaSB", + "adminPass": "5bjyggD6SaSB", "id": "3d8bedd4-003d-417a-8cd7-a94cb181185d", "links": [ { diff --git a/doc/v3/api_samples/os-lock-server/server-post-resp.json b/doc/v3/api_samples/os-lock-server/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-lock-server/server-post-resp.json +++ b/doc/v3/api_samples/os-lock-server/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-migrate-server/server-post-resp.json b/doc/v3/api_samples/os-migrate-server/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-migrate-server/server-post-resp.json +++ b/doc/v3/api_samples/os-migrate-server/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-multinic/server-post-resp.json b/doc/v3/api_samples/os-multinic/server-post-resp.json index 7a88d2e911..a7e72d6a7f 100644 --- a/doc/v3/api_samples/os-multinic/server-post-resp.json +++ b/doc/v3/api_samples/os-multinic/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "5Y9rR4XaM8Qg", + "adminPass": "5Y9rR4XaM8Qg", "id": "bbe8d469-e8cb-49b1-96d8-f93b68c82355", "links": [ { diff --git a/doc/v3/api_samples/os-pause-server/server-post-resp.json b/doc/v3/api_samples/os-pause-server/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-pause-server/server-post-resp.json +++ b/doc/v3/api_samples/os-pause-server/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-pci/server-post-resp.json b/doc/v3/api_samples/os-pci/server-post-resp.json index 6b9ad18047..deb7cbc1f7 100644 --- a/doc/v3/api_samples/os-pci/server-post-resp.json +++ b/doc/v3/api_samples/os-pci/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "8C5KEgw2cQxu", + "adminPass": "8C5KEgw2cQxu", "id": "fb947804-6a43-499d-9526-3eac8adf7271", "links": [ { diff --git a/doc/v3/api_samples/os-remote-consoles/server-post-resp.json b/doc/v3/api_samples/os-remote-consoles/server-post-resp.json index 3d22d59aa6..e48a97813e 100644 --- a/doc/v3/api_samples/os-remote-consoles/server-post-resp.json +++ b/doc/v3/api_samples/os-remote-consoles/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "Kwg5tff6KiUU", + "adminPass": "Kwg5tff6KiUU", "id": "8619225c-67c8-424f-9b46-cec5bad137a2", "links": [ { diff --git a/doc/v3/api_samples/os-rescue/server-post-resp.json b/doc/v3/api_samples/os-rescue/server-post-resp.json index 2a19467660..19534dcc00 100644 --- a/doc/v3/api_samples/os-rescue/server-post-resp.json +++ b/doc/v3/api_samples/os-rescue/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "8RK85ufqhJVq", + "adminPass": "8RK85ufqhJVq", "id": "edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "links": [ { diff --git a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json index 6a0e1c1e8d..a06736dd2b 100644 --- a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json +++ b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "erQXgJ8NBDD4", + "adminPass": "erQXgJ8NBDD4", "id": "4c8b1df3-46f7-4555-98d8-cdb869aaf9ad", "links": [ { diff --git a/doc/v3/api_samples/os-security-groups/server-post-resp.json b/doc/v3/api_samples/os-security-groups/server-post-resp.json index 4f1d6b752c..df29afea9d 100644 --- a/doc/v3/api_samples/os-security-groups/server-post-resp.json +++ b/doc/v3/api_samples/os-security-groups/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "ki8cbWeZdxH6", + "adminPass": "ki8cbWeZdxH6", "id": "2dabdd93-ced7-4607-a542-2516de84e0e5", "links": [ { diff --git a/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json b/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json index 03db3eab87..cb16c18038 100644 --- a/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json +++ b/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "H83mnjinc5jy", + "adminPass": "H83mnjinc5jy", "id": "b2bbf280-a78d-4724-90ba-b00dd5659097", "links": [ { diff --git a/doc/v3/api_samples/os-server-external-events/server-post-resp.json b/doc/v3/api_samples/os-server-external-events/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-server-external-events/server-post-resp.json +++ b/doc/v3/api_samples/os-server-external-events/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-server-usage/server-post-resp.json b/doc/v3/api_samples/os-server-usage/server-post-resp.json index 3394fde4b1..b725ed5d81 100644 --- a/doc/v3/api_samples/os-server-usage/server-post-resp.json +++ b/doc/v3/api_samples/os-server-usage/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "kmspFLBzL75q", + "adminPass": "kmspFLBzL75q", "id": "f8eeb5ba-19b7-49be-a1a9-10250dda5b14", "links": [ { diff --git a/doc/v3/api_samples/os-shelve/server-post-resp.json b/doc/v3/api_samples/os-shelve/server-post-resp.json index 2d2eafa2eb..c6c478e625 100644 --- a/doc/v3/api_samples/os-shelve/server-post-resp.json +++ b/doc/v3/api_samples/os-shelve/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "LJGRdNsvzh6z", + "adminPass": "LJGRdNsvzh6z", "id": "1d08717a-835e-4dca-9bfb-166fa18a6715", "links": [ { diff --git a/doc/v3/api_samples/os-suspend-server/server-post-resp.json b/doc/v3/api_samples/os-suspend-server/server-post-resp.json index 270cb84634..353517739f 100644 --- a/doc/v3/api_samples/os-suspend-server/server-post-resp.json +++ b/doc/v3/api_samples/os-suspend-server/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "DM3QzjhGTzLB", + "adminPass": "DM3QzjhGTzLB", "id": "bebeec79-497e-4711-a311-d0d2e3dfc73b", "links": [ { diff --git a/doc/v3/api_samples/os-user-data/userdata-post-resp.json b/doc/v3/api_samples/os-user-data/userdata-post-resp.json index f9e7b1172d..aeeae8d86c 100644 --- a/doc/v3/api_samples/os-user-data/userdata-post-resp.json +++ b/doc/v3/api_samples/os-user-data/userdata-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "2xHoDU7Gd7vw", + "adminPass": "2xHoDU7Gd7vw", "id": "976a62bb-0d4a-4e17-9044-1864e888a557", "links": [ { diff --git a/doc/v3/api_samples/server-ips/server-post-resp.json b/doc/v3/api_samples/server-ips/server-post-resp.json index 482fc6b077..70064c0996 100644 --- a/doc/v3/api_samples/server-ips/server-post-resp.json +++ b/doc/v3/api_samples/server-ips/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "Ag463BYwnXEf", + "adminPass": "Ag463BYwnXEf", "id": "0813a7dc-8e97-42df-9634-957109499bf0", "links": [ { diff --git a/doc/v3/api_samples/server-metadata/server-post-resp.json b/doc/v3/api_samples/server-metadata/server-post-resp.json index a20d117f0c..aa8939bf18 100644 --- a/doc/v3/api_samples/server-metadata/server-post-resp.json +++ b/doc/v3/api_samples/server-metadata/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "ys9M8HQXvwEJ", + "adminPass": "ys9M8HQXvwEJ", "id": "a6ebe5b4-b68b-420b-9c1e-620c4d3e0389", "links": [ { diff --git a/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json b/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json index 4da2c1a74b..7109160b2b 100644 --- a/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json +++ b/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json @@ -2,7 +2,7 @@ "rebuild" : { "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", - "admin_password" : "seekr3t", + "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, diff --git a/doc/v3/api_samples/servers/server-action-rebuild-resp.json b/doc/v3/api_samples/servers/server-action-rebuild-resp.json index 2f17a0d529..abb6dfff88 100644 --- a/doc/v3/api_samples/servers/server-action-rebuild-resp.json +++ b/doc/v3/api_samples/servers/server-action-rebuild-resp.json @@ -10,7 +10,7 @@ } ] }, - "admin_password": "seekr3t", + "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "id": "1", diff --git a/doc/v3/api_samples/servers/server-action-rebuild.json b/doc/v3/api_samples/servers/server-action-rebuild.json index 964f6fa1d6..ba7de21e65 100644 --- a/doc/v3/api_samples/servers/server-action-rebuild.json +++ b/doc/v3/api_samples/servers/server-action-rebuild.json @@ -2,7 +2,7 @@ "rebuild" : { "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", - "admin_password" : "seekr3t", + "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, diff --git a/doc/v3/api_samples/servers/server-post-resp.json b/doc/v3/api_samples/servers/server-post-resp.json index 6e2e900f64..3e69dffe68 100644 --- a/doc/v3/api_samples/servers/server-post-resp.json +++ b/doc/v3/api_samples/servers/server-post-resp.json @@ -1,6 +1,6 @@ { "server": { - "admin_password": "6NpUwoz2QDRN", + "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 1009556941..cd679f6608 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -564,7 +564,7 @@ def create(self, req, body): server = self._view_builder.create(req, instances[0]) if CONF.enable_instance_password: - server['server']['admin_password'] = password + server['server']['adminPass'] = password robj = wsgi.ResponseObject(server) @@ -907,7 +907,7 @@ def _action_rebuild(self, req, id, body): # Add on the admin_password attribute since the view doesn't do it # unless instance passwords are disabled if CONF.enable_instance_password: - view['server']['admin_password'] = password + view['server']['adminPass'] = password robj = wsgi.ResponseObject(view) return self._add_location(robj) @@ -981,12 +981,12 @@ def _action_create_image(self, req, id, body): def _get_server_admin_password(self, server): """Determine the admin password for a server on creation.""" try: - password = server['admin_password'] + password = server['adminPass'] self._validate_admin_password(password) except KeyError: password = utils.generate_password() except ValueError: - raise exc.HTTPBadRequest(explanation=_("Invalid admin_password")) + raise exc.HTTPBadRequest(explanation=_("Invalid adminPass")) return password diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py b/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py index be1ea941e7..c5ffc59226 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py @@ -371,7 +371,7 @@ def test_create_multiple_instances_pass_disabled(self): def _check_admin_password_len(self, server_dict): """utility function - check server_dict for admin_password length.""" self.assertEqual(CONF.password_length, - len(server_dict["admin_password"])) + len(server_dict["adminPass"])) def _check_admin_password_missing(self, server_dict): """utility function - check server_dict for admin_password absence.""" diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index 7b1e98638c..c142a6f08d 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -155,7 +155,7 @@ def test_actions_with_locked_instance(self): body_map = {'resize': {'flavor_ref': '2'}, 'reboot': {'type': 'HARD'}, 'rebuild': {'image_ref': self.image_uuid, - 'admin_password': 'TNc53Dr8s7vw'}} + 'adminPass': 'TNc53Dr8s7vw'}} args_map = {'resize': (('2'), {}), 'confirm_resize': ((), {}), @@ -269,7 +269,7 @@ def test_rebuild_accepted_minimum(self): body = robj.obj self.assertEqual(body['server']['image']['id'], '2') - self.assertEqual(len(body['server']['admin_password']), + self.assertEqual(len(body['server']['adminPass']), CONF.password_length) self.assertEqual(robj['location'], self_href) @@ -427,7 +427,7 @@ def test_rebuild_admin_password(self): body = { "rebuild": { "image_ref": self._image_href, - "admin_password": "asdf", + "adminPass": "asdf", }, } @@ -435,7 +435,7 @@ def test_rebuild_admin_password(self): body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj self.assertEqual(body['server']['image']['id'], '2') - self.assertEqual(body['server']['admin_password'], 'asdf') + self.assertEqual(body['server']['adminPass'], 'asdf') def test_rebuild_admin_password_pass_disabled(self): # run with enable_instance_password disabled to verify admin_password @@ -457,7 +457,7 @@ def test_rebuild_admin_password_pass_disabled(self): body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj self.assertEqual(body['server']['image']['id'], '2') - self.assertNotIn('admin_password', body['server']) + self.assertNotIn('adminPass', body['server']) def test_rebuild_server_not_found(self): def server_not_found(self, instance_id, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index ff3436babd..3c20684853 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -1635,8 +1635,8 @@ def test_update_server_name_all_blank_spaces(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, FAKE_UUID, body=body) - def test_update_server_adminPass_ignored(self): - inst_dict = dict(name='server_test', adminPass='bacon') + def test_update_server_admin_password_ignored(self): + inst_dict = dict(name='server_test', admin_password='bacon') body = dict(server=inst_dict) def server_update(context, id, params): @@ -1911,11 +1911,11 @@ def queue_get_for(context, *args): def _check_admin_password_len(self, server_dict): """utility function - check server_dict for admin_password length.""" self.assertEqual(CONF.password_length, - len(server_dict["admin_password"])) + len(server_dict["adminPass"])) def _check_admin_password_missing(self, server_dict): """utility function - check server_dict for admin_password absence.""" - self.assertNotIn("admin_password", server_dict) + self.assertNotIn("adminPass", server_dict) def _test_create_instance(self, flavor=2): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' @@ -2390,27 +2390,27 @@ def test_create_instance_local_href(self): def test_create_instance_admin_password(self): self.body['server']['flavor_ref'] = 3 - self.body['server']['admin_password'] = 'testpass' + self.body['server']['adminPass'] = 'testpass' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] - self.assertEqual(server['admin_password'], - self.body['server']['admin_password']) + self.assertEqual(server['adminPass'], + self.body['server']['adminPass']) def test_create_instance_admin_password_pass_disabled(self): self.flags(enable_instance_password=False) self.body['server']['flavor_ref'] = 3 - self.body['server']['admin_password'] = 'testpass' + self.body['server']['adminPass'] = 'testpass' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj self.assertIn('server', res) - self.assertIn('admin_password', self.body['server']) + self.assertIn('adminPass', self.body['server']) def test_create_instance_admin_password_empty(self): self.body['server']['flavor_ref'] = 3 - self.body['server']['admin_password'] = '' + self.body['server']['adminPass'] = '' self.req.body = jsonutils.dumps(self.body) # The fact that the action doesn't raise is enough validation diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl index 740c3909b9..7f18b0677e 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl index 61dc8279f8..7f9a09d0df 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl @@ -10,7 +10,7 @@ } ] }, - "admin_password": "%(password)s", + "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl index 835bf9a813..495b3188fa 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl @@ -2,7 +2,7 @@ "server": { "os-access-ips:access_ip_v4": "%(access_ip_v4)s", "os-access-ips:access_ip_v6": "%(access_ip_v6)s", - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl index 7f9843c505..08ca8539d9 100644 --- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl index eb3f76ebe6..adfaaa381e 100644 --- a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl index 68a5938ef2..5802293c04 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl @@ -10,7 +10,7 @@ } ] }, - "admin_password": "%(password)s", + "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl index 7b042642b0..919e5b3951 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl @@ -2,7 +2,7 @@ "rebuild" : { "image_ref" : "%(glance_host)s/images/%(uuid)s", "name" : "%(name)s", - "admin_password" : "%(pass)s", + "adminPass" : "%(pass)s", "metadata" : { "meta_var" : "meta_val" }, diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl index 68a5938ef2..5802293c04 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl @@ -10,7 +10,7 @@ } ] }, - "admin_password": "%(password)s", + "adminPass": "%(password)s", "created": "%(isotime)s", "flavor": { "id": "1", diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl index f1f21a3401..219aebc853 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl @@ -2,7 +2,7 @@ "rebuild" : { "image_ref" : "%(glance_host)s/images/%(uuid)s", "name" : "%(name)s", - "admin_password" : "%(pass)s", + "adminPass" : "%(pass)s", "metadata" : { "meta_var" : "meta_val" }, diff --git a/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl index 7af0df5ec0..71654b4b8a 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "admin_password": "%(password)s", + "adminPass": "%(password)s", "id": "%(id)s", "links": [ { From 5ed82cf7ed1f7bb5c69f72047f51ac1d42292f47 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Fri, 15 Aug 2014 11:29:26 +0800 Subject: [PATCH 398/486] Change 'image_ref'/'flavor_ref' into v2 style for servers This patch changes 'image_ref'/'flavor_ref' to 'imageRef'/'flavorRef' that same with v2 api. And also change the related unittest. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: Ic17334e0a9de89488d5e0c8b75950027d51a3cd4 --- .../all_extensions/server-post-req.json | 4 +- .../api_samples/consoles/server-post-req.json | 4 +- .../os-access-ips/server-action-rebuild.json | 2 +- .../os-access-ips/server-post-req.json | 4 +- .../os-admin-actions/server-post-req.json | 4 +- .../os-admin-password/server-post-req.json | 4 +- .../os-attach-interfaces/server-post-req.json | 4 +- .../os-availability-zone/server-post-req.json | 4 +- .../os-config-drive/server-post-req.json | 4 +- .../server-post-req.json | 4 +- .../os-console-output/server-post-req.json | 4 +- .../os-create-backup/server-post-req.json | 4 +- .../os-deferred-delete/server-post-req.json | 4 +- .../os-evacuate/server-post-req.json | 4 +- .../server-post-req.json | 4 +- .../server-post-req.json | 4 +- .../os-extended-status/server-post-req.json | 4 +- .../os-extended-volumes/server-post-req.json | 4 +- .../server-post-req.json | 4 +- .../os-lock-server/server-post-req.json | 4 +- .../os-migrate-server/server-post-req.json | 4 +- .../os-multinic/server-post-req.json | 4 +- .../os-pause-server/server-post-req.json | 4 +- .../api_samples/os-pci/server-post-req.json | 4 +- .../os-remote-consoles/server-post-req.json | 4 +- .../os-rescue/server-post-req.json | 4 +- .../scheduler-hints-post-req.json | 4 +- .../os-security-groups/server-post-req.json | 4 +- .../server-post-req.json | 4 +- .../server-post-req.json | 4 +- .../os-server-usage/server-post-req.json | 4 +- .../os-shelve/server-post-req.json | 4 +- .../os-suspend-server/server-post-req.json | 4 +- .../os-user-data/userdata-post-req.json | 4 +- .../server-ips/server-post-req.json | 4 +- .../server-metadata/server-post-req.json | 4 +- ...ver-action-rebuild-preserve-ephemeral.json | 2 +- .../servers/server-action-rebuild.json | 2 +- .../servers/server-action-resize.json | 2 +- .../api_samples/servers/server-post-req.json | 4 +- .../openstack/compute/plugins/v3/servers.py | 26 +++--- .../plugins/v3/test_availability_zone.py | 12 +-- .../plugins/v3/test_block_device_mapping.py | 8 +- .../compute/plugins/v3/test_config_drive.py | 8 +- .../plugins/v3/test_multiple_create.py | 44 +++++----- .../plugins/v3/test_scheduler_hints.py | 16 ++-- .../plugins/v3/test_security_groups.py | 6 +- .../compute/plugins/v3/test_server_actions.py | 50 +++++------ .../compute/plugins/v3/test_servers.py | 84 +++++++++---------- .../compute/plugins/v3/test_user_data.py | 12 +-- nova/tests/integrated/test_servers.py | 4 +- .../all_extensions/server-post-req.json.tpl | 4 +- .../consoles/server-post-req.json.tpl | 4 +- .../server-action-rebuild.json.tpl | 2 +- .../os-access-ips/server-post-req.json.tpl | 4 +- .../os-admin-actions/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-config-drive/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-create-backup/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-evacuate/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-lock-server/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-multinic/server-post-req.json.tpl | 4 +- .../os-pause-server/server-post-req.json.tpl | 4 +- .../os-pci/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-rescue/server-post-req.json.tpl | 4 +- .../scheduler-hints-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-server-usage/server-post-req.json.tpl | 4 +- .../os-shelve/server-post-req.json.tpl | 4 +- .../server-post-req.json.tpl | 4 +- .../os-user-data/userdata-post-req.json.tpl | 4 +- .../server-ips/server-post-req.json.tpl | 4 +- .../server-metadata/server-post-req.json.tpl | 4 +- ...action-rebuild-preserve-ephemeral.json.tpl | 2 +- .../servers/server-action-rebuild.json.tpl | 2 +- .../servers/server-action-resize.json.tpl | 2 +- .../servers/server-post-req.json.tpl | 4 +- 91 files changed, 287 insertions(+), 287 deletions(-) diff --git a/doc/v3/api_samples/all_extensions/server-post-req.json b/doc/v3/api_samples/all_extensions/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/all_extensions/server-post-req.json +++ b/doc/v3/api_samples/all_extensions/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/consoles/server-post-req.json b/doc/v3/api_samples/consoles/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/consoles/server-post-req.json +++ b/doc/v3/api_samples/consoles/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-access-ips/server-action-rebuild.json b/doc/v3/api_samples/os-access-ips/server-action-rebuild.json index 678bd647ba..9285071478 100644 --- a/doc/v3/api_samples/os-access-ips/server-action-rebuild.json +++ b/doc/v3/api_samples/os-access-ips/server-action-rebuild.json @@ -2,7 +2,7 @@ "rebuild" : { "os-access-ips:access_ip_v4": "4.3.2.1", "os-access-ips:access_ip_v6": "80fe::", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "name" : "new-server-test", "metadata" : { "meta_var" : "meta_val" diff --git a/doc/v3/api_samples/os-access-ips/server-post-req.json b/doc/v3/api_samples/os-access-ips/server-post-req.json index b0f0e90e53..5bd3781f3e 100644 --- a/doc/v3/api_samples/os-access-ips/server-post-req.json +++ b/doc/v3/api_samples/os-access-ips/server-post-req.json @@ -3,8 +3,8 @@ "os-access-ips:access_ip_v4": "1.2.3.4", "os-access-ips:access_ip_v6": "fe80::", "name" : "new-server-test", - "image_ref" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/openstack/flavors/1", + "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-admin-actions/server-post-req.json b/doc/v3/api_samples/os-admin-actions/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-admin-actions/server-post-req.json +++ b/doc/v3/api_samples/os-admin-actions/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-admin-password/server-post-req.json b/doc/v3/api_samples/os-admin-password/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-admin-password/server-post-req.json +++ b/doc/v3/api_samples/os-admin-password/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-attach-interfaces/server-post-req.json b/doc/v3/api_samples/os-attach-interfaces/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-attach-interfaces/server-post-req.json +++ b/doc/v3/api_samples/os-attach-interfaces/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-availability-zone/server-post-req.json b/doc/v3/api_samples/os-availability-zone/server-post-req.json index 0377284764..83df44d977 100644 --- a/doc/v3/api_samples/os-availability-zone/server-post-req.json +++ b/doc/v3/api_samples/os-availability-zone/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/openstack/flavors/1", + "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", "os-availability-zone:availability_zone" : "test" "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/v3/api_samples/os-config-drive/server-post-req.json b/doc/v3/api_samples/os-config-drive/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-config-drive/server-post-req.json +++ b/doc/v3/api_samples/os-config-drive/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json b/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json +++ b/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-console-output/server-post-req.json b/doc/v3/api_samples/os-console-output/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-console-output/server-post-req.json +++ b/doc/v3/api_samples/os-console-output/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-create-backup/server-post-req.json b/doc/v3/api_samples/os-create-backup/server-post-req.json index 30851df41a..1c45fbb32f 100644 --- a/doc/v3/api_samples/os-create-backup/server-post-req.json +++ b/doc/v3/api_samples/os-create-backup/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-deferred-delete/server-post-req.json b/doc/v3/api_samples/os-deferred-delete/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-deferred-delete/server-post-req.json +++ b/doc/v3/api_samples/os-deferred-delete/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-evacuate/server-post-req.json b/doc/v3/api_samples/os-evacuate/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-evacuate/server-post-req.json +++ b/doc/v3/api_samples/os-evacuate/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json b/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json +++ b/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json b/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json +++ b/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-extended-status/server-post-req.json b/doc/v3/api_samples/os-extended-status/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-extended-status/server-post-req.json +++ b/doc/v3/api_samples/os-extended-status/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-extended-volumes/server-post-req.json b/doc/v3/api_samples/os-extended-volumes/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-extended-volumes/server-post-req.json +++ b/doc/v3/api_samples/os-extended-volumes/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json b/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json +++ b/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-lock-server/server-post-req.json b/doc/v3/api_samples/os-lock-server/server-post-req.json index 30851df41a..1c45fbb32f 100644 --- a/doc/v3/api_samples/os-lock-server/server-post-req.json +++ b/doc/v3/api_samples/os-lock-server/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-migrate-server/server-post-req.json b/doc/v3/api_samples/os-migrate-server/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-migrate-server/server-post-req.json +++ b/doc/v3/api_samples/os-migrate-server/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-multinic/server-post-req.json b/doc/v3/api_samples/os-multinic/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-multinic/server-post-req.json +++ b/doc/v3/api_samples/os-multinic/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-pause-server/server-post-req.json b/doc/v3/api_samples/os-pause-server/server-post-req.json index 30851df41a..1c45fbb32f 100644 --- a/doc/v3/api_samples/os-pause-server/server-post-req.json +++ b/doc/v3/api_samples/os-pause-server/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-pci/server-post-req.json b/doc/v3/api_samples/os-pci/server-post-req.json index 30851df41a..1c45fbb32f 100644 --- a/doc/v3/api_samples/os-pci/server-post-req.json +++ b/doc/v3/api_samples/os-pci/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-remote-consoles/server-post-req.json b/doc/v3/api_samples/os-remote-consoles/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-remote-consoles/server-post-req.json +++ b/doc/v3/api_samples/os-remote-consoles/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-rescue/server-post-req.json b/doc/v3/api_samples/os-rescue/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-rescue/server-post-req.json +++ b/doc/v3/api_samples/os-rescue/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json index 0c5c998e42..0ee3de0871 100644 --- a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json +++ b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/openstack/flavors/1", + "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", "os-scheduler-hints:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } diff --git a/doc/v3/api_samples/os-security-groups/server-post-req.json b/doc/v3/api_samples/os-security-groups/server-post-req.json index 428217cfc3..365dde78e0 100644 --- a/doc/v3/api_samples/os-security-groups/server-post-req.json +++ b/doc/v3/api_samples/os-security-groups/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/openstack/flavors/1", + "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-server-diagnostics/server-post-req.json b/doc/v3/api_samples/os-server-diagnostics/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-server-diagnostics/server-post-req.json +++ b/doc/v3/api_samples/os-server-diagnostics/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-server-external-events/server-post-req.json b/doc/v3/api_samples/os-server-external-events/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-server-external-events/server-post-req.json +++ b/doc/v3/api_samples/os-server-external-events/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-server-usage/server-post-req.json b/doc/v3/api_samples/os-server-usage/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-server-usage/server-post-req.json +++ b/doc/v3/api_samples/os-server-usage/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-shelve/server-post-req.json b/doc/v3/api_samples/os-shelve/server-post-req.json index 8cc0fd01df..f63022b56b 100644 --- a/doc/v3/api_samples/os-shelve/server-post-req.json +++ b/doc/v3/api_samples/os-shelve/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/openstack/flavors/1", + "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/os-suspend-server/server-post-req.json b/doc/v3/api_samples/os-suspend-server/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/os-suspend-server/server-post-req.json +++ b/doc/v3/api_samples/os-suspend-server/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/os-user-data/userdata-post-req.json b/doc/v3/api_samples/os-user-data/userdata-post-req.json index 21ca21b6db..e11c610961 100644 --- a/doc/v3/api_samples/os-user-data/userdata-post-req.json +++ b/doc/v3/api_samples/os-user-data/userdata-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/openstack/flavors/1", + "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/doc/v3/api_samples/server-ips/server-post-req.json b/doc/v3/api_samples/server-ips/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/server-ips/server-post-req.json +++ b/doc/v3/api_samples/server-ips/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/server-metadata/server-post-req.json b/doc/v3/api_samples/server-metadata/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/server-metadata/server-post-req.json +++ b/doc/v3/api_samples/server-metadata/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json b/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json index 7109160b2b..e348f8af4f 100644 --- a/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json +++ b/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json @@ -1,6 +1,6 @@ { "rebuild" : { - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { diff --git a/doc/v3/api_samples/servers/server-action-rebuild.json b/doc/v3/api_samples/servers/server-action-rebuild.json index ba7de21e65..7900828eab 100644 --- a/doc/v3/api_samples/servers/server-action-rebuild.json +++ b/doc/v3/api_samples/servers/server-action-rebuild.json @@ -1,6 +1,6 @@ { "rebuild" : { - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { diff --git a/doc/v3/api_samples/servers/server-action-resize.json b/doc/v3/api_samples/servers/server-action-resize.json index 7dcf7751db..bdaa37a176 100644 --- a/doc/v3/api_samples/servers/server-action-resize.json +++ b/doc/v3/api_samples/servers/server-action-resize.json @@ -1,5 +1,5 @@ { "resize" : { - "flavor_ref" : "2" + "flavorRef" : "2" } } \ No newline at end of file diff --git a/doc/v3/api_samples/servers/server-post-req.json b/doc/v3/api_samples/servers/server-post-req.json index 2eedab6147..d4c7973c10 100644 --- a/doc/v3/api_samples/servers/server-post-req.json +++ b/doc/v3/api_samples/servers/server-post-req.json @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavor_ref" : "http://openstack.example.com/flavors/1", + "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index cd679f6608..f0763acafb 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -491,7 +491,7 @@ def create(self, req, body): try: flavor_id = self._flavor_id_from_req_data(body) except ValueError as error: - msg = _("Invalid flavor_ref provided.") + msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) try: @@ -519,7 +519,7 @@ def create(self, req, body): msg = _("Can not find requested image") raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound as error: - msg = _("Invalid flavor_ref provided.") + msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) except exception.KeypairNotFound as error: msg = _("Invalid key_name provided.") @@ -779,7 +779,7 @@ def _image_uuid_from_href(self, image_href): image_uuid = image_href.split('/').pop() if not uuidutils.is_uuid_like(image_uuid): - msg = _("Invalid image_ref provided.") + msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) return image_uuid @@ -788,24 +788,24 @@ def _image_from_req_data(self, server_dict, create_kwargs): """Get image data from the request or raise appropriate exceptions. - The field image_ref is mandatory when no block devices have been + The field imageRef is mandatory when no block devices have been defined and must be a proper uuid when present. """ - image_href = server_dict.get('image_ref') + image_href = server_dict.get('imageRef') if not image_href and create_kwargs.get('block_device_mapping'): return '' elif image_href: return self._image_uuid_from_href(unicode(image_href)) else: - msg = _("Missing image_ref attribute") + msg = _("Missing imageRef attribute") raise exc.HTTPBadRequest(explanation=msg) def _flavor_id_from_req_data(self, data): try: - flavor_ref = data['server']['flavor_ref'] + flavor_ref = data['server']['flavorRef'] except (TypeError, KeyError): - msg = _("Missing flavor_ref attribute") + msg = _("Missing flavorRef attribute") raise exc.HTTPBadRequest(explanation=msg) return common.get_id_from_href(flavor_ref) @@ -817,12 +817,12 @@ def _action_resize(self, req, id, body): """Resizes a given instance to the flavor size requested.""" resize_dict = body['resize'] try: - flavor_ref = str(resize_dict["flavor_ref"]) + flavor_ref = str(resize_dict["flavorRef"]) if not flavor_ref: - msg = _("Resize request has invalid 'flavor_ref' attribute.") + msg = _("Resize request has invalid 'flavorRef' attribute.") raise exc.HTTPBadRequest(explanation=msg) except (KeyError, TypeError): - msg = _("Resize requests require 'flavor_ref' attribute.") + msg = _("Resize requests require 'flavorRef' attribute.") raise exc.HTTPBadRequest(explanation=msg) resize_kwargs = {} @@ -837,9 +837,9 @@ def _action_rebuild(self, req, id, body): rebuild_dict = body['rebuild'] try: - image_href = rebuild_dict["image_ref"] + image_href = rebuild_dict["imageRef"] except (KeyError, TypeError): - msg = _("Could not parse image_ref from request.") + msg = _("Could not parse imageRef from request.") raise exc.HTTPBadRequest(explanation=msg) image_href = self._image_uuid_from_href(image_href) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py b/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py index ef41a7ef7b..4516e0660b 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py @@ -344,9 +344,9 @@ def queue_get_for(context, *args): def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) if no_image: - server.pop('image_ref', None) + server.pop('imageRef', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/v3/servers') @@ -384,8 +384,8 @@ def create(*args, **kwargs): body = { 'server': { 'name': 'config_drive_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', @@ -416,8 +416,8 @@ def test_create_instance_without_availability_zone(self): body = { 'server': { 'name': 'config_drive_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py b/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py index 3ff8a6a046..639b808f0d 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py @@ -62,8 +62,8 @@ def _test_create(self, params, no_image=False, override_controller=None): 'server': { 'min_count': 2, 'name': 'server_test', - 'image_ref': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', - 'flavor_ref': 'http://localhost/123/flavors/3', + 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', + 'flavorRef': 'http://localhost/123/flavors/3', 'metadata': { 'hello': 'world', 'open': 'stack', @@ -72,7 +72,7 @@ def _test_create(self, params, no_image=False, override_controller=None): } if no_image: - del body['server']['image_ref'] + del body['server']['imageRef'] body['server'].update(params) @@ -109,7 +109,7 @@ def test_create_instance_with_volumes_enabled_no_image(self): old_create = compute_api.API.create def create(*args, **kwargs): - self.assertNotIn('image_ref', kwargs) + self.assertNotIn('imageRef', kwargs) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py b/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py index 3ab3cf7018..a0adb97e65 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py @@ -177,9 +177,9 @@ def queue_get_for(context, *args): def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) if no_image: - server.pop('image_ref', None) + server.pop('imageRef', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') @@ -215,8 +215,8 @@ def create(*args, **kwargs): body = { 'server': { 'name': 'config_drive_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py b/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py index c5ffc59226..f1eb49b6dc 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py @@ -140,9 +140,9 @@ def queue_get_for(context, *args): def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) if no_image: - server.pop('image_ref', None) + server.pop('imageRef', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') @@ -217,8 +217,8 @@ def test_create_instance_invalid_negative_min(self): 'server': { multiple_create.MIN_ATTRIBUTE_NAME: -1, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') @@ -238,8 +238,8 @@ def test_create_instance_invalid_negative_max(self): 'server': { multiple_create.MAX_ATTRIBUTE_NAME: -1, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') @@ -260,8 +260,8 @@ def test_create_instance_invalid_min_greater_than_max(self): multiple_create.MIN_ATTRIBUTE_NAME: 4, multiple_create.MAX_ATTRIBUTE_NAME: 2, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') @@ -281,8 +281,8 @@ def test_create_instance_invalid_alpha_min(self): 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 'abcd', 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') @@ -302,8 +302,8 @@ def test_create_instance_invalid_alpha_max(self): 'server': { multiple_create.MAX_ATTRIBUTE_NAME: 'abcd', 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') @@ -325,8 +325,8 @@ def test_create_multiple_instances(self): 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, } @@ -352,8 +352,8 @@ def test_create_multiple_instances_pass_disabled(self): 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, } @@ -387,8 +387,8 @@ def _create_multiple_instances_resv_id_return(self, resv_id_return): 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, multiple_create.RRID_ATTRIBUTE_NAME: resv_id_return @@ -463,8 +463,8 @@ def test_create_multiple_instance_with_non_integer_max_count(self): 'server': { multiple_create.MAX_ATTRIBUTE_NAME: 2.5, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, } @@ -484,8 +484,8 @@ def test_create_multiple_instance_with_non_integer_min_count(self): 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2.5, 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, } diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py b/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py index fcd8af0faa..a133a20b70 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py @@ -66,8 +66,8 @@ def fake_create(*args, **kwargs): req.content_type = 'application/json' body = {'server': { 'name': 'server_test', - 'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175', - 'flavor_ref': '1', + 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', + 'flavorRef': '1', }} req.body = jsonutils.dumps(body) @@ -89,8 +89,8 @@ def fake_create(*args, **kwargs): body = { 'server': { 'name': 'server_test', - 'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175', - 'flavor_ref': '1', + 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', + 'flavorRef': '1', 'os-scheduler-hints:scheduler_hints': hints, }, } @@ -106,8 +106,8 @@ def test_create_server_bad_hints(self): body = { 'server': { 'name': 'server_test', - 'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175', - 'flavor_ref': '1', + 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', + 'flavorRef': '1', 'os-scheduler-hints:scheduler_hints': 'non-dict', }, } @@ -213,9 +213,9 @@ def queue_get_for(context, *args): def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) if no_image: - server.pop('image_ref', None) + server.pop('imageRef', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py b/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py index 38cb07dd87..214b154fb0 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_security_groups.py @@ -141,7 +141,7 @@ def _get_groups(self, server): def test_create(self): url = '/v3/servers' image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) res = self._make_request(url, {'server': server}) self.assertEqual(res.status_int, 202) server = self._get_server(res.body) @@ -277,9 +277,9 @@ def queue_get_for(context, *args): def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) if no_image: - server.pop('image_ref', None) + server.pop('imageRef', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index c142a6f08d..c61c9e78d0 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -152,9 +152,9 @@ def test_actions_with_locked_instance(self): actions = ['resize', 'confirm_resize', 'revert_resize', 'reboot', 'rebuild'] - body_map = {'resize': {'flavor_ref': '2'}, + body_map = {'resize': {'flavorRef': '2'}, 'reboot': {'type': 'HARD'}, - 'rebuild': {'image_ref': self.image_uuid, + 'rebuild': {'imageRef': self.image_uuid, 'adminPass': 'TNc53Dr8s7vw'}} args_map = {'resize': (('2'), {}), @@ -260,7 +260,7 @@ def test_rebuild_accepted_minimum(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, }, } @@ -287,7 +287,7 @@ def rebuild(self2, context, instance, image_href, *args, **kwargs): # proper local hrefs must start with 'http://localhost/v3/' body = { 'rebuild': { - 'image_ref': self.image_uuid, + 'imageRef': self.image_uuid, }, } @@ -308,7 +308,7 @@ def rebuild(self2, context, instance, image_href, *args, **kwargs): # proper local hrefs must start with 'http://localhost/v3/' body = { 'rebuild': { - 'image_ref': self.image_href, + 'imageRef': self.image_href, }, } @@ -328,7 +328,7 @@ def test_rebuild_accepted_minimum_pass_disabled(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, }, } @@ -344,7 +344,7 @@ def test_rebuild_accepted_minimum_pass_disabled(self): def test_rebuild_raises_conflict_on_invalid_state(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, }, } @@ -369,7 +369,7 @@ def test_rebuild_accepted_with_metadata(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, "metadata": metadata, }, } @@ -382,7 +382,7 @@ def test_rebuild_accepted_with_metadata(self): def test_rebuild_accepted_with_bad_metadata(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, "metadata": "stack", }, } @@ -395,7 +395,7 @@ def test_rebuild_accepted_with_bad_metadata(self): def test_rebuild_with_too_large_metadata(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, "metadata": { 256 * "k": "value" } @@ -426,7 +426,7 @@ def test_rebuild_admin_password(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, "adminPass": "asdf", }, } @@ -448,7 +448,7 @@ def test_rebuild_admin_password_pass_disabled(self): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, "admin_password": "asdf", }, } @@ -467,7 +467,7 @@ def server_not_found(self, instance_id, body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, }, } @@ -479,7 +479,7 @@ def server_not_found(self, instance_id, def test_rebuild_with_bad_image(self): body = { "rebuild": { - "image_ref": "foo", + "imageRef": "foo", }, } req = fakes.HTTPRequestV3.blank(self.url) @@ -507,7 +507,7 @@ def return_image_meta(*args, **kwargs): self.stubs.Set(fake._FakeImageService, 'show', return_image_meta) body = { "rebuild": { - "image_ref": "155d900f-4e14-4e4c-a73d-069cbf4541e6", + "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6", }, } req = fakes.HTTPRequestV3.blank(self.url) @@ -552,7 +552,7 @@ def return_image_meta(*args, **kwargs): self.stubs.Set(objects.Instance, 'save', fake_save) body = { "rebuild": { - "image_ref": "155d900f-4e14-4e4c-a73d-069cbf4541e6", + "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6", }, } req = fakes.HTTPRequestV3.blank(self.url) @@ -568,7 +568,7 @@ def _test_rebuild_preserve_ephemeral(self, value=None): body = { "rebuild": { - "image_ref": self._image_href, + "imageRef": self._image_href, }, } if value is not None: @@ -599,7 +599,7 @@ def test_rebuild_preserve_ephemeral_default(self): def test_resize_server(self): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) self.resize_called = False @@ -622,7 +622,7 @@ def test_resize_server_no_flavor(self): req, FAKE_UUID, body) def test_resize_server_no_flavor_ref(self): - body = dict(resize=dict(flavor_ref=None)) + body = dict(resize=dict(flavorRef=None)) req = fakes.HTTPRequestV3.blank(self.url) self.assertRaises(webob.exc.HTTPBadRequest, @@ -630,7 +630,7 @@ def test_resize_server_no_flavor_ref(self): req, FAKE_UUID, body) def test_resize_with_server_not_found(self): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) self.stubs.Set(compute_api.API, 'get', return_server_not_found) @@ -640,7 +640,7 @@ def test_resize_with_server_not_found(self): req, FAKE_UUID, body) def test_resize_with_image_exceptions(self): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) self.resize_called = 0 image_id = 'fake_image_id' @@ -668,7 +668,7 @@ def _fake_resize(obj, context, instance, flavor_id): self.assertEqual(self.resize_called, call_no + 1) def test_resize_with_too_many_instances(self): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) def fake_resize(*args, **kwargs): raise exception.TooManyInstances(message="TooManyInstance") @@ -683,7 +683,7 @@ def fake_resize(*args, **kwargs): @mock.patch('nova.compute.api.API.resize', side_effect=exception.CannotResizeDisk(reason='')) def test_resize_raises_cannot_resize_disk(self, mock_resize): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) req = fakes.HTTPRequestV3.blank(self.url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_resize, @@ -693,14 +693,14 @@ def test_resize_raises_cannot_resize_disk(self, mock_resize): side_effect=exception.FlavorNotFound(reason='', flavor_id='fake_id')) def test_resize_raises_flavor_not_found(self, mock_resize): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) req = fakes.HTTPRequestV3.blank(self.url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_resize, req, FAKE_UUID, body) def test_resize_raises_conflict_on_invalid_state(self): - body = dict(resize=dict(flavor_ref="http://localhost/3")) + body = dict(resize=dict(flavorRef="http://localhost/3")) def fake_resize(*args, **kwargs): raise exception.InstanceInvalidState(attr='fake_attr', diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 3c20684853..3aff6f5c68 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -1381,7 +1381,7 @@ def setUp(self): self.body = { 'rebuild': { 'name': 'new_name', - 'image_ref': self.image_href, + 'imageRef': self.image_href, 'metadata': { 'open': 'stack', }, @@ -1891,8 +1891,8 @@ def queue_get_for(context, *args): self.body = { 'server': { 'name': 'server_test', - 'image_ref': self.image_uuid, - 'flavor_ref': self.flavor_ref, + 'imageRef': self.image_uuid, + 'flavorRef': self.flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', @@ -1919,8 +1919,8 @@ def _check_admin_password_missing(self, server_dict): def _test_create_instance(self, flavor=2): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - self.body['server']['image_ref'] = image_uuid - self.body['server']['flavor_ref'] = flavor + self.body['server']['imageRef'] = image_uuid + self.body['server']['flavorRef'] = flavor self.req.body = jsonutils.dumps(self.body) server = self.controller.create(self.req, body=self.body).obj['server'] self._check_admin_password_len(server) @@ -1947,7 +1947,7 @@ def test_create_instance_private_flavor(self): def test_create_server_bad_image_href(self): image_href = 1 self.body['server']['min_count'] = 1 - self.body['server']['image_ref'] = image_href, + self.body['server']['imageRef'] = image_href, self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, @@ -1983,7 +1983,7 @@ def test_create_server_with_deleted_image(self): self.addCleanup(image_service.update, context, self.image_uuid, {'status': 'active'}) - self.body['server']['flavor_ref'] = 2 + self.body['server']['flavorRef'] = 2 self.req.body = jsonutils.dumps(self.body) with testtools.ExpectedException( webob.exc.HTTPBadRequest, @@ -2004,7 +2004,7 @@ def test_create_server_image_too_large(self): self.addCleanup(image_service.update, context, self.image_uuid, {'size': orig_size}) - self.body['server']['flavor_ref'] = 2 + self.body['server']['flavorRef'] = 2 self.req.body = jsonutils.dumps(self.body) with testtools.ExpectedException( @@ -2014,7 +2014,7 @@ def test_create_server_image_too_large(self): def test_create_instance_image_ref_is_bookmark(self): image_href = 'http://localhost/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj @@ -2025,8 +2025,8 @@ def test_create_instance_image_ref_is_invalid(self): image_uuid = 'this_is_not_a_valid_uuid' image_href = 'http://localhost/images/%s' % image_uuid flavor_ref = 'http://localhost/flavors/3' - self.body['server']['image_ref'] = image_href - self.body['server']['flavor_ref'] = flavor_ref + self.body['server']['imageRef'] = image_href + self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @@ -2036,9 +2036,9 @@ def test_create_instance_no_key_pair(self): self._test_create_instance() def _test_create_extra(self, params, no_image=False): - self.body['server']['flavor_ref'] = 2 + self.body['server']['flavorRef'] = 2 if no_image: - self.body['server'].pop('image_ref', None) + self.body['server'].pop('imageRef', None) self.body['server'].update(params) self.req.body = jsonutils.dumps(self.body) self.req.headers["content-type"] = "application/json" @@ -2167,7 +2167,7 @@ def test_create_instance_with_pass_disabled(self): # proper local hrefs must start with 'http://localhost/v3/' self.flags(enable_instance_password=False) image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj @@ -2179,7 +2179,7 @@ def test_create_instance_name_too_long(self): # proper local hrefs must start with 'http://localhost/v3/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['name'] = 'X' * 256 - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @@ -2211,7 +2211,7 @@ def test_create_instance_name_all_blank_spaces(self): def test_create_instance(self): # proper local hrefs must start with 'http://localhost/v3/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj @@ -2233,8 +2233,8 @@ def fake_keypair_server_create(self, server_dict, body = { 'server': { 'name': 'server_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', @@ -2253,7 +2253,7 @@ def test_create_instance_pass_disabled(self): self.flags(enable_instance_password=False) # proper local hrefs must start with 'http://localhost/v3/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj @@ -2264,7 +2264,7 @@ def test_create_instance_pass_disabled(self): def test_create_instance_too_much_metadata(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata']['vote'] = 'fiddletown' self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPForbidden, @@ -2273,7 +2273,7 @@ def test_create_instance_too_much_metadata(self): def test_create_instance_metadata_key_too_long(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {('a' * 260): '12345'} self.req.body = jsonutils.dumps(self.body) @@ -2283,7 +2283,7 @@ def test_create_instance_metadata_key_too_long(self): def test_create_instance_metadata_value_too_long(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {'key1': ('a' * 260)} self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, @@ -2292,7 +2292,7 @@ def test_create_instance_metadata_value_too_long(self): def test_create_instance_metadata_key_blank(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {'': 'abcd'} self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, @@ -2301,7 +2301,7 @@ def test_create_instance_metadata_key_blank(self): def test_create_instance_metadata_not_dict(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = 'string' self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, @@ -2310,7 +2310,7 @@ def test_create_instance_metadata_not_dict(self): def test_create_instance_metadata_key_not_string(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {1: 'test'} self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, @@ -2319,7 +2319,7 @@ def test_create_instance_metadata_key_not_string(self): def test_create_instance_metadata_value_not_string(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {'test': ['a', 'list']} self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, @@ -2332,7 +2332,7 @@ def test_create_user_data_malformed_bad_request(self): def test_create_instance_invalid_key_name(self): image_href = 'http://localhost/v2/images/2' - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.body['server']['key_name'] = 'nonexistentkey' self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, @@ -2349,8 +2349,8 @@ def test_create_instance_valid_key_name(self): def test_create_instance_invalid_flavor_href(self): image_href = 'http://localhost/v2/images/2' flavor_ref = 'http://localhost/v2/flavors/asdf' - self.body['server']['image_ref'] = image_href - self.body['server']['flavor_ref'] = flavor_ref + self.body['server']['imageRef'] = image_href + self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @@ -2358,8 +2358,8 @@ def test_create_instance_invalid_flavor_href(self): def test_create_instance_invalid_flavor_id_int(self): image_href = 'http://localhost/v2/images/2' flavor_ref = -1 - self.body['server']['image_ref'] = image_href - self.body['server']['flavor_ref'] = flavor_ref + self.body['server']['imageRef'] = image_href + self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @@ -2367,15 +2367,15 @@ def test_create_instance_invalid_flavor_id_int(self): def test_create_instance_bad_flavor_href(self): image_href = 'http://localhost/v2/images/2' flavor_ref = 'http://localhost/v2/flavors/17' - self.body['server']['image_ref'] = image_href - self.body['server']['flavor_ref'] = flavor_ref + self.body['server']['imageRef'] = image_href + self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_bad_href(self): image_href = 'asdf' - self.body['server']['image_ref'] = image_href + self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, @@ -2389,7 +2389,7 @@ def test_create_instance_local_href(self): self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_admin_password(self): - self.body['server']['flavor_ref'] = 3 + self.body['server']['flavorRef'] = 3 self.body['server']['adminPass'] = 'testpass' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj @@ -2400,7 +2400,7 @@ def test_create_instance_admin_password(self): def test_create_instance_admin_password_pass_disabled(self): self.flags(enable_instance_password=False) - self.body['server']['flavor_ref'] = 3 + self.body['server']['flavorRef'] = 3 self.body['server']['adminPass'] = 'testpass' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj @@ -2409,7 +2409,7 @@ def test_create_instance_admin_password_pass_disabled(self): self.assertIn('adminPass', self.body['server']) def test_create_instance_admin_password_empty(self): - self.body['server']['flavor_ref'] = 3 + self.body['server']['flavorRef'] = 3 self.body['server']['adminPass'] = '' self.req.body = jsonutils.dumps(self.body) @@ -2426,7 +2426,7 @@ def test_create_location(self): def _do_test_create_instance_above_quota(self, resource, allowed, quota, expected_msg): fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource) - self.body['server']['flavor_ref'] = 3 + self.body['server']['flavorRef'] = 3 self.req.body = jsonutils.dumps(self.body) try: self.controller.create(self.req, body=self.body).obj['server'] @@ -2546,8 +2546,8 @@ def setUp(self): self.body = { 'server': { 'name': 'server_test', - 'image_ref': self.image_uuid, - 'flavor_ref': self.flavor_ref, + 'imageRef': self.image_uuid, + 'flavorRef': self.flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', @@ -2559,9 +2559,9 @@ def setUp(self): self.req.headers["content-type"] = "application/json" def _test_create_extra(self, params, no_image=False): - self.body['server']['flavor_ref'] = 2 + self.body['server']['flavorRef'] = 2 if no_image: - self.body['server'].pop('image_ref', None) + self.body['server'].pop('imageRef', None) self.body['server'].update(params) self.req.body = jsonutils.dumps(self.body) self.req.headers["content-type"] = "application/json" diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py b/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py index 3b072f4458..4b47d07075 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py @@ -143,9 +143,9 @@ def queue_get_for(context, *args): def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) if no_image: - server.pop('image_ref', None) + server.pop('imageRef', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') @@ -188,8 +188,8 @@ def test_create_instance_with_user_data(self): body = { 'server': { 'name': 'user_data_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', @@ -214,8 +214,8 @@ def test_create_instance_with_bad_user_data(self): body = { 'server': { 'name': 'user_data_test', - 'image_ref': image_href, - 'flavor_ref': flavor_ref, + 'imageRef': image_href, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 110bad3844..6142e045e3 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -512,8 +512,8 @@ def test_create_server_with_injected_files(self): class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest): _force_delete_parameter = 'force_delete' _api_version = 'v3' - _image_ref_parameter = 'image_ref' - _flavor_ref_parameter = 'flavor_ref' + _image_ref_parameter = 'imageRef' + _flavor_ref_parameter = 'flavorRef' _return_resv_id_parameter = 'os-multiple-create:return_reservation_id' _min_count_parameter = 'os-multiple-create:min_count' _access_ipv4_parameter = None diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl index 544edbf3fb..f1f7ed03c2 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl @@ -2,7 +2,7 @@ "rebuild" : { "os-access-ips:access_ip_v4": "%(access_ip_v4)s", "os-access-ips:access_ip_v6": "%(access_ip_v6)s", - "image_ref" : "%(glance_host)s/images/%(image_id)s", + "imageRef" : "%(glance_host)s/images/%(image_id)s", "name" : "new-server-test", "metadata" : { "meta_var" : "meta_val" diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl index d1f9852611..d99d2562aa 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl @@ -3,8 +3,8 @@ "os-access-ips:access_ip_v4": "%(access_ip_v4)s", "os-access-ips:access_ip_v6": "%(access_ip_v6)s", "name" : "new-server-test", - "image_ref" : "%(host)s/openstack/images/%(image_id)s", - "flavor_ref" : "%(host)s/openstack/flavors/1", + "imageRef" : "%(host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl index 39b6986b9f..f0fa5a5b42 100644 --- a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(host)s/openstack/images/%(image_id)s", - "flavor_ref" : "%(host)s/openstack/flavors/1", + "imageRef" : "%(host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", "os-availability-zone:availability_zone": "nova", "metadata" : { "My Server Name" : "Apache1" diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl index e6c046ceb4..27557a3e9f 100644 --- a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl index e6c046ceb4..27557a3e9f 100644 --- a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl index e6c046ceb4..27557a3e9f 100644 --- a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl index e6c046ceb4..27557a3e9f 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl index c98a0a5853..cfba4ee9e2 100644 --- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/openstack/images/%(image_id)s", - "flavor_ref" : "%(host)s/openstack/flavors/1", + "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", "os-scheduler-hints:scheduler_hints": { "same_host": "%(uuid)s" } diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl index 5a2262df91..de72904cc5 100644 --- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/openstack/images/%(image_id)s", - "flavor_ref" : "%(host)s/openstack/flavors/1", + "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl index b1013defdf..6f9336d3c0 100644 --- a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/openstack/images/%(image_id)s", - "flavor_ref" : "%(host)s/openstack/flavors/1", + "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl index 9e6d436840..37f0a75d0a 100644 --- a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(host)s/openstack/images/%(image_id)s", - "flavor_ref" : "%(host)s/openstack/flavors/1", + "imageRef" : "%(host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", "metadata" : { "My Server Name" : "Apache1" }, diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl index 919e5b3951..8f38088c19 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl @@ -1,6 +1,6 @@ { "rebuild" : { - "image_ref" : "%(glance_host)s/images/%(uuid)s", + "imageRef" : "%(glance_host)s/images/%(uuid)s", "name" : "%(name)s", "adminPass" : "%(pass)s", "metadata" : { diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl index 219aebc853..6385f10593 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl @@ -1,6 +1,6 @@ { "rebuild" : { - "image_ref" : "%(glance_host)s/images/%(uuid)s", + "imageRef" : "%(glance_host)s/images/%(uuid)s", "name" : "%(name)s", "adminPass" : "%(pass)s", "metadata" : { diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl index 368e6bc076..468a88da24 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl @@ -1,5 +1,5 @@ { "resize" : { - "flavor_ref" : "%(id)s" + "flavorRef" : "%(id)s" } } diff --git a/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl index d9a7537dfb..ab0a3bb797 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl @@ -1,8 +1,8 @@ { "server" : { "name" : "new-server-test", - "image_ref" : "%(glance_host)s/images/%(image_id)s", - "flavor_ref" : "%(host)s/flavors/1", + "imageRef" : "%(glance_host)s/images/%(image_id)s", + "flavorRef" : "%(host)s/flavors/1", "metadata" : { "My Server Name" : "Apache1" } From f6587818a085e6775690ea2b6be7316d11d89d2b Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 13 Aug 2014 12:17:00 -0700 Subject: [PATCH 399/486] Treat instance like an object in _default_block_device_names The _default_block_device_names method already gets an instance object but was treating it like a primitive dict so this change enforces the object usage with dot notation when accessing fields. Also updates the existing unit tests to use an instance object. Part of blueprint compute-manager-objects-juno Change-Id: I2706fa081ba427956dc4f643d1e5664eaa2573ed --- nova/compute/manager.py | 10 +++++----- nova/tests/compute/test_compute.py | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 718f285f1f..1ccfa2968c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1672,10 +1672,10 @@ def _default_block_device_names(self, context, instance, if root_bdm.device_name: root_device_name = root_bdm.device_name - instance['root_device_name'] = root_device_name + instance.root_device_name = root_device_name update_instance = True - elif instance['root_device_name']: - root_device_name = instance['root_device_name'] + elif instance.root_device_name: + root_device_name = instance.root_device_name root_bdm.device_name = root_device_name update_root_bdm = True else: @@ -1683,12 +1683,12 @@ def _default_block_device_names(self, context, instance, image_meta, root_bdm) - instance['root_device_name'] = root_device_name + instance.root_device_name = root_device_name root_bdm.device_name = root_device_name update_instance = update_root_bdm = True if update_instance: - self._instance_update(context, instance['uuid'], + self._instance_update(context, instance.uuid, root_device_name=root_device_name) if update_root_bdm: root_bdm.save() diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index e921f91570..025c51326a 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -6805,7 +6805,7 @@ def fake_setup_networks_on_host(self, *args, **kwargs): self.assertEqual(vm_states.ACTIVE, instance['vm_state']) def _get_instance_and_bdm_for_dev_defaults_tests(self): - instance = self._create_fake_instance( + instance = self._create_fake_instance_obj( params={'root_device_name': '/dev/vda'}) block_device_mapping = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( @@ -6820,11 +6820,11 @@ def _get_instance_and_bdm_for_dev_defaults_tests(self): def test_default_block_device_names_empty_instance_root_dev(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() - instance['root_device_name'] = None + instance.root_device_name = None self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') - self.compute._instance_update(self.context, instance['uuid'], + self.compute._instance_update(self.context, instance.uuid, root_device_name='/dev/vda') self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], @@ -6852,7 +6852,7 @@ def test_default_block_device_names_empty_root_device(self): def test_default_block_device_names_no_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() - instance['root_device_name'] = None + instance.root_device_name = None bdms[0]['device_name'] = None self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') @@ -6863,7 +6863,7 @@ def test_default_block_device_names_no_root_device(self): self.compute._default_root_device_name(instance, mox.IgnoreArg(), bdms[0]).AndReturn('/dev/vda') - self.compute._instance_update(self.context, instance['uuid'], + self.compute._instance_update(self.context, instance.uuid, root_device_name='/dev/vda') bdms[0].save().AndReturn(None) self.compute._default_device_names_for_instance(instance, @@ -6875,7 +6875,7 @@ def test_default_block_device_names_no_root_device(self): {}, bdms) def test_default_block_device_names_with_blank_volumes(self): - instance = self._create_fake_instance() + instance = self._create_fake_instance_obj() image_meta = {} root_volume = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ @@ -6926,7 +6926,7 @@ def test_default_block_device_names_with_blank_volumes(self): default_root_device.assert_called_once_with(instance, image_meta, bdms[0]) instance_update.assert_called_once_with( - self.context, instance['uuid'], root_device_name='/dev/vda') + self.context, instance.uuid, root_device_name='/dev/vda') self.assertTrue(object_save.called) default_device_names.assert_called_once_with(instance, '/dev/vda', [bdms[-2]], [bdms[-1]], From eaa90be5ad759919f67c8e586851b8f9fadef580 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Thu, 17 Jul 2014 14:11:50 +0930 Subject: [PATCH 400/486] Check compulsory flavor create parameters exist Check that the name, ram, vcpu and disk parameters exist in the request for flavor creation in order to be able to return a more informative error message to a user when they are not supplied. Change-Id: I7fd235f8b57e5ecba37e50d6f0ce0a3866c1cd55 Closes-Bug: 1253525 --- .../openstack/compute/contrib/flavormanage.py | 16 +++++++++ .../compute/contrib/test_flavor_manage.py | 33 +++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/nova/api/openstack/compute/contrib/flavormanage.py b/nova/api/openstack/compute/contrib/flavormanage.py index af5df772e0..707253be6b 100644 --- a/nova/api/openstack/compute/contrib/flavormanage.py +++ b/nova/api/openstack/compute/contrib/flavormanage.py @@ -56,10 +56,26 @@ def _create(self, req, body): raise webob.exc.HTTPBadRequest(explanation=msg) vals = body['flavor'] name = vals.get('name') + if name is None: + msg = _("A valid name parameter is required") + raise webob.exc.HTTPBadRequest(explanation=msg) + flavorid = vals.get('id') memory = vals.get('ram') + if memory is None: + msg = _("A valid ram parameter is required") + raise webob.exc.HTTPBadRequest(explanation=msg) + vcpus = vals.get('vcpus') + if vcpus is None: + msg = _("A valid vcpus parameter is required") + raise webob.exc.HTTPBadRequest(explanation=msg) + root_gb = vals.get('disk') + if root_gb is None: + msg = _("A valid disk parameter is required") + raise webob.exc.HTTPBadRequest(explanation=msg) + ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0) swap = vals.get('swap', 0) rxtx_factor = vals.get('rxtx_factor', 1.0) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py index 43a84e12f3..925e2ab761 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py @@ -120,6 +120,39 @@ def test_delete(self): self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, req, "failtest") + def _test_create_missing_parameter(self, parameter): + body = { + "flavor": { + "name": "azAZ09. -_", + "ram": 512, + "vcpus": 2, + "disk": 1, + "OS-FLV-EXT-DATA:ephemeral": 1, + "id": unicode('1234'), + "swap": 512, + "rxtx_factor": 1, + "os-flavor-access:is_public": True, + } + } + + del body['flavor'][parameter] + + req = fakes.HTTPRequest.blank('/v2/123/flavors') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create, + req, body) + + def test_create_missing_name(self): + self._test_create_missing_parameter('name') + + def test_create_missing_ram(self): + self._test_create_missing_parameter('ram') + + def test_create_missing_vcpus(self): + self._test_create_missing_parameter('vcpus') + + def test_create_missing_disk(self): + self._test_create_missing_parameter('disk') + def _create_flavor_success_case(self, body): url = '/v2/fake/flavors' req = webob.Request.blank(url) From 569a8739bcf7671dc26e2c7e47da9b8873bb276a Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Thu, 14 Aug 2014 10:00:25 +0800 Subject: [PATCH 401/486] Share unittest between v2 and v2.1 for hide_server_addresses extension This patch make the v2.1 hide_server_addresses extension running on v2 unittest, also delete the v3 unittest. Partially implements blueprint v2-on-v3-api Change-Id: I9b8e8d10ba59b344d5bdf285fffffd98ed02ebc0 --- .../contrib/test_hide_server_addresses.py | 37 +++-- .../plugins/v3/test_hide_server_addresses.py | 136 ------------------ 2 files changed, 25 insertions(+), 148 deletions(-) delete mode 100644 nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py diff --git a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py index 905137eafb..b840968ca2 100644 --- a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py +++ b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py @@ -41,23 +41,25 @@ def _return_server(*_args, **_kwargs): return _return_server -class HideServerAddressesTest(test.TestCase): +class HideServerAddressesTestV21(test.TestCase): content_type = 'application/json' + base_url = '/v3/servers' + + def _setup_wsgi(self): + self.wsgi_app = fakes.wsgi_app_v3( + init_only=('servers', 'os-hide-server-addresses')) def setUp(self): - super(HideServerAddressesTest, self).setUp() + super(HideServerAddressesTestV21, self).setUp() fakes.stub_out_nw_api(self.stubs) - self.flags( - osapi_compute_extension=[ - 'nova.api.openstack.compute.contrib.select_extensions'], - osapi_compute_ext_list=['Hide_server_addresses']) return_server = fakes.fake_instance_get() self.stubs.Set(db, 'instance_get_by_uuid', return_server) + self._setup_wsgi() def _make_request(self, url): req = webob.Request.blank(url) req.headers['Accept'] = self.content_type - res = req.get_response(fakes.wsgi_app(init_only=('servers',))) + res = req.get_response(self.wsgi_app) return res @staticmethod @@ -85,7 +87,7 @@ def test_show_hides_in_building(self): self.stubs.Set(compute.api.API, 'get', fake_compute_get(instance_id, uuid=uuid, vm_state=vm_states.BUILDING)) - res = self._make_request('/v2/fake/servers/%s' % uuid) + res = self._make_request(self.base_url + '/%s' % uuid) self.assertEqual(res.status_int, 200) server = self._get_server(res.body) @@ -98,7 +100,7 @@ def test_show(self): self.stubs.Set(compute.api.API, 'get', fake_compute_get(instance_id, uuid=uuid, vm_state=vm_states.ACTIVE)) - res = self._make_request('/v2/fake/servers/%s' % uuid) + res = self._make_request(self.base_url + '/%s' % uuid) self.assertEqual(res.status_int, 200) server = self._get_server(res.body) @@ -118,7 +120,7 @@ def get_all(*args, **kwargs): args[1], objects.InstanceList(), instances, fields) self.stubs.Set(compute.api.API, 'get_all', get_all) - res = self._make_request('/v2/fake/servers/detail') + res = self._make_request(self.base_url + '/detail') self.assertEqual(res.status_int, 200) servers = self._get_servers(res.body) @@ -136,12 +138,23 @@ def fake_compute_get(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(compute.api.API, 'get', fake_compute_get) - res = self._make_request('/v2/fake/servers/' + fakes.get_fake_uuid()) + res = self._make_request(self.base_url + '/' + fakes.get_fake_uuid()) self.assertEqual(res.status_int, 404) -class HideAddressesXmlTest(HideServerAddressesTest): +class HideServerAddressesTestV2(HideServerAddressesTestV21): + base_url = '/v2/fake/servers' + + def _setup_wsgi(self): + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Hide_server_addresses']) + self.wsgi_app = fakes.wsgi_app(init_only=('servers',)) + + +class HideAddressesXmlTest(HideServerAddressesTestV2): content_type = 'application/xml' @staticmethod diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py b/nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py deleted file mode 100644 index ad57aa095c..0000000000 --- a/nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools - -import webob - -from nova import compute -from nova.compute import vm_states -from nova import db -from nova import exception -from nova import objects -from nova.objects import instance as instance_obj -from nova.openstack.common import jsonutils -from nova import test -from nova.tests.api.openstack import fakes -from nova.tests import fake_instance - - -SENTINEL = object() - - -def fake_compute_get(*args, **kwargs): - def _return_server(*_args, **_kwargs): - inst = fakes.stub_instance(*args, **kwargs) - return fake_instance.fake_instance_obj(_args[1], **inst) - return _return_server - - -class HideServerAddressesTest(test.TestCase): - content_type = 'application/json' - - def setUp(self): - super(HideServerAddressesTest, self).setUp() - fakes.stub_out_nw_api(self.stubs) - return_server = fakes.fake_instance_get() - self.stubs.Set(db, 'instance_get_by_uuid', return_server) - - def _make_request(self, url): - req = webob.Request.blank(url) - req.headers['Accept'] = self.content_type - res = req.get_response(fakes.wsgi_app_v3( - init_only=('servers', 'os-hide-server-addresses'))) - return res - - @staticmethod - def _get_server(body): - return jsonutils.loads(body).get('server') - - @staticmethod - def _get_servers(body): - return jsonutils.loads(body).get('servers') - - @staticmethod - def _get_addresses(server): - return server.get('addresses', SENTINEL) - - def _check_addresses(self, addresses, exists): - self.assertTrue(addresses is not SENTINEL) - if exists: - self.assertTrue(addresses) - else: - self.assertFalse(addresses) - - def test_show_hides_in_building(self): - instance_id = 1 - uuid = fakes.get_fake_uuid(instance_id) - self.stubs.Set(compute.api.API, 'get', - fake_compute_get(instance_id, uuid=uuid, - vm_state=vm_states.BUILDING)) - res = self._make_request('/v3/servers/%s' % uuid) - self.assertEqual(res.status_int, 200) - - server = self._get_server(res.body) - addresses = self._get_addresses(server) - self._check_addresses(addresses, exists=False) - - def test_show(self): - instance_id = 1 - uuid = fakes.get_fake_uuid(instance_id) - self.stubs.Set(compute.api.API, 'get', - fake_compute_get(instance_id, uuid=uuid, - vm_state=vm_states.ACTIVE)) - res = self._make_request('/v3/servers/%s' % uuid) - self.assertEqual(res.status_int, 200) - - server = self._get_server(res.body) - addresses = self._get_addresses(server) - self._check_addresses(addresses, exists=True) - - def test_detail_hides_building_server_addresses(self): - instance_0 = fakes.stub_instance(0, uuid=fakes.get_fake_uuid(0), - vm_state=vm_states.ACTIVE) - instance_1 = fakes.stub_instance(1, uuid=fakes.get_fake_uuid(1), - vm_state=vm_states.BUILDING) - instances = [instance_0, instance_1] - - def get_all(*args, **kwargs): - fields = instance_obj.INSTANCE_DEFAULT_FIELDS - return instance_obj._make_instance_list( - args[1], objects.InstanceList(), instances, fields) - - self.stubs.Set(compute.api.API, 'get_all', get_all) - res = self._make_request('/v3/servers/detail') - - self.assertEqual(res.status_int, 200) - servers = self._get_servers(res.body) - - self.assertEqual(len(servers), len(instances)) - - for instance, server in itertools.izip(instances, servers): - addresses = self._get_addresses(server) - exists = (instance['vm_state'] == vm_states.ACTIVE) - self._check_addresses(addresses, exists=exists) - - def test_no_instance_passthrough_404(self): - - def fake_compute_get(*args, **kwargs): - raise exception.InstanceNotFound(instance_id='fake') - - self.stubs.Set(compute.api.API, 'get', fake_compute_get) - res = self._make_request('/v3/servers/' + fakes.get_fake_uuid()) - - self.assertEqual(res.status_int, 404) From e9f7fed9d5c595b152d044f52487534b918d5ed7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 18 Aug 2014 06:04:32 +0000 Subject: [PATCH 402/486] Imported Translations from Transifex Change-Id: I2749dc18aac367faf827caf8f1f2699fbf8ce954 --- nova/locale/de/LC_MESSAGES/nova-log-info.po | 88 ++- .../locale/en_AU/LC_MESSAGES/nova-log-info.po | 88 ++- .../en_GB/LC_MESSAGES/nova-log-error.po | 160 ++-- .../locale/en_GB/LC_MESSAGES/nova-log-info.po | 90 ++- nova/locale/en_US/LC_MESSAGES/nova.po | 710 +++++++++-------- nova/locale/es/LC_MESSAGES/nova-log-error.po | 163 ++-- nova/locale/es/LC_MESSAGES/nova-log-info.po | 90 ++- .../locale/es/LC_MESSAGES/nova-log-warning.po | 130 ++-- nova/locale/es/LC_MESSAGES/nova.po | 721 +++++++++--------- nova/locale/fr/LC_MESSAGES/nova-log-error.po | 160 ++-- nova/locale/fr/LC_MESSAGES/nova-log-info.po | 90 ++- nova/locale/it/LC_MESSAGES/nova-log-info.po | 88 ++- nova/locale/ja/LC_MESSAGES/nova-log-error.po | 160 ++-- nova/locale/ja/LC_MESSAGES/nova-log-info.po | 88 ++- .../ko_KR/LC_MESSAGES/nova-log-error.po | 160 ++-- .../locale/ko_KR/LC_MESSAGES/nova-log-info.po | 88 ++- nova/locale/nova-log-error.pot | 162 ++-- nova/locale/nova-log-info.pot | 90 ++- nova/locale/nova-log-warning.pot | 132 ++-- nova/locale/nova.pot | 711 +++++++++-------- .../pt_BR/LC_MESSAGES/nova-log-error.po | 160 ++-- .../locale/pt_BR/LC_MESSAGES/nova-log-info.po | 88 ++- .../zh_CN/LC_MESSAGES/nova-log-error.po | 160 ++-- .../locale/zh_CN/LC_MESSAGES/nova-log-info.po | 88 ++- .../locale/zh_TW/LC_MESSAGES/nova-log-info.po | 88 ++- 25 files changed, 2620 insertions(+), 2133 deletions(-) diff --git a/nova/locale/de/LC_MESSAGES/nova-log-info.po b/nova/locale/de/LC_MESSAGES/nova-log-info.po index f3dd924aa1..03883e9935 100644 --- a/nova/locale/de/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/de/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: German (http://www.transifex.com/projects/p/nova/language/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -148,130 +153,135 @@ msgstr "Lösche doppelte Zeile mit der ID %(id)s aus der Tabelle %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -288,55 +298,55 @@ msgstr "" msgid "Attempted to unfilter instance which is not filtered" msgstr "" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" msgstr "" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " "%(remote)d on other nodes sharing this instance storage" msgstr "" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "" diff --git a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po index 06d38f2769..475632a54e 100644 --- a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (Australia) (http://www.transifex.com/projects/p/nova/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,130 +151,135 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -286,55 +296,55 @@ msgstr "" msgid "Attempted to unfilter instance which is not filtered" msgstr "" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" msgstr "" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " "%(remote)d on other nodes sharing this instance storage" msgstr "" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "" diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po index 482a94af32..87eaad1f05 100644 --- a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" @@ -196,7 +196,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -205,99 +205,139 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Failed to notify cells of instance fault" @@ -389,116 +429,116 @@ msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -515,17 +555,17 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" @@ -535,19 +575,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -566,18 +606,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po index 609e89ce4d..b726cf27ed 100644 --- a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" -"PO-Revision-Date: 2014-08-07 07:51+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" +"PO-Revision-Date: 2014-08-15 05:00+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" "nova/language/en_GB/)\n" @@ -45,7 +45,12 @@ msgstr "HTTP exception thrown: %s" msgid "Deleting network with id %s" msgstr "Deleting network with id %s" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "bringing vm to original state: '%s'" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -148,98 +153,103 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "%(num_values)d values found, of which the minimum value will be used." +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "instance chain %s disappeared during refresh, skipping" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "Unable to force TCG mode, libguestfs too old?" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "Instance destroyed successfully." -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "Instance may be started again." -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "Going to destroy instance again." -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "Beginning live snapshot process" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "Beginning cold snapshot process" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "Snapshot extracted, beginning image upload" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "Snapshot image upload complete" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "Instance soft rebooted successfully." -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "Instance shutdown successfully." -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "Instance may have been rebooted during soft reboot, so return now." -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "Instance rebooted successfully." -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "Instance spawned successfully." -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Truncated console log returned, %d bytes ignored" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "Creating image" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "Using config drive" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creating config drive at %(path)s" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "Configuring timezone for windows instance to localtime" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -248,7 +258,7 @@ msgstr "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -257,26 +267,26 @@ msgstr "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "Instance launched has CPU info: %s" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "Instance running successfully." -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "Deleting instance files %s" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "Deletion of %s failed" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "Deletion of %s complete" @@ -293,39 +303,39 @@ msgstr "Ensuring static filters" msgid "Attempted to unfilter instance which is not filtered" msgstr "Attempted to unfilter instance which is not filtered" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "Writing stored info to %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" msgstr "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s): generating checksum" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "Base file too young to remove: %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "Removing base file: %s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "image %(id)s at (%(base_file)s): checking" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -334,17 +344,17 @@ msgstr "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " "%(remote)d on other nodes sharing this instance storage" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "Active base files: %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "Corrupt base files: %s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "Removable base files: %s" diff --git a/nova/locale/en_US/LC_MESSAGES/nova.po b/nova/locale/en_US/LC_MESSAGES/nova.po index e3f9f93888..43a7079bff 100644 --- a/nova/locale/en_US/LC_MESSAGES/nova.po +++ b/nova/locale/en_US/LC_MESSAGES/nova.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Nova\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/nova\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2013-01-21 18:28+0000\n" "Last-Translator: Jeremy Stanley \n" "Language-Team: en_US \n" @@ -37,19 +37,23 @@ msgstr "" msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:192 +#: nova/block_device.py:191 +msgid "Invalid device UUID." +msgstr "" + +#: nova/block_device.py:195 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:371 +#: nova/block_device.py:374 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:375 +#: nova/block_device.py:378 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:385 +#: nova/block_device.py:388 msgid "Invalid volume_size." msgstr "" @@ -1867,6 +1871,43 @@ msgstr "" msgid "Architecture name '%(arch)s' is not recognised" msgstr "" +#: nova/exception.py:1645 +msgid "CPU and memory allocation must be provided for all NUMA nodes" +msgstr "" + +#: nova/exception.py:1650 +#, python-format +msgid "" +"Image property '%(name)s' is not permitted to override NUMA configuration" +" set against the flavor" +msgstr "" + +#: nova/exception.py:1655 +msgid "" +"Asymmetric NUMA topologies require explicit assignment of CPUs and memory" +" to nodes in image or flavor" +msgstr "" + +#: nova/exception.py:1660 +#, python-format +msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" +msgstr "" + +#: nova/exception.py:1664 +#, python-format +msgid "CPU number %(cpunum)d is assigned to two nodes" +msgstr "" + +#: nova/exception.py:1668 +#, python-format +msgid "CPU number %(cpuset)s is not assigned to any node" +msgstr "" + +#: nova/exception.py:1672 +#, python-format +msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -2106,147 +2147,147 @@ msgstr "Unauthorized request for controller=%(controller)s and action=%(action)s msgid "Unknown error occurred." msgstr "" -#: nova/api/ec2/cloud.py:392 +#: nova/api/ec2/cloud.py:391 #, python-format msgid "Create snapshot of volume %s" msgstr "Create snapshot of volume %s" -#: nova/api/ec2/cloud.py:417 +#: nova/api/ec2/cloud.py:418 #, python-format msgid "Could not find key pair(s): %s" msgstr "Could not find key pair(s): %s" -#: nova/api/ec2/cloud.py:433 +#: nova/api/ec2/cloud.py:434 #, python-format msgid "Create key pair %s" msgstr "Create key pair %s" -#: nova/api/ec2/cloud.py:445 +#: nova/api/ec2/cloud.py:446 #, python-format msgid "Import key %s" msgstr "Import key %s" -#: nova/api/ec2/cloud.py:458 +#: nova/api/ec2/cloud.py:459 #, python-format msgid "Delete key pair %s" msgstr "Delete key pair %s" -#: nova/api/ec2/cloud.py:600 nova/api/ec2/cloud.py:730 +#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:731 msgid "need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:605 +#: nova/api/ec2/cloud.py:606 msgid "can't build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:613 +#: nova/api/ec2/cloud.py:614 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "" -#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:683 +#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:684 msgid "No rule for the specified parameters." msgstr "No rule for the specified parameters." -#: nova/api/ec2/cloud.py:761 +#: nova/api/ec2/cloud.py:762 #, python-format msgid "Get console output for instance %s" msgstr "Get console output for instance %s" -#: nova/api/ec2/cloud.py:833 +#: nova/api/ec2/cloud.py:834 #, python-format msgid "Create volume from snapshot %s" msgstr "Create volume from snapshot %s" -#: nova/api/ec2/cloud.py:837 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:838 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "Create volume of %s GB" -#: nova/api/ec2/cloud.py:877 +#: nova/api/ec2/cloud.py:878 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" -#: nova/api/ec2/cloud.py:907 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:908 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "Detach volume %s" -#: nova/api/ec2/cloud.py:1261 +#: nova/api/ec2/cloud.py:1262 msgid "Allocate address" msgstr "Allocate address" -#: nova/api/ec2/cloud.py:1266 +#: nova/api/ec2/cloud.py:1267 #, python-format msgid "Release address %s" msgstr "Release address %s" -#: nova/api/ec2/cloud.py:1271 +#: nova/api/ec2/cloud.py:1272 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "Associate address %(public_ip)s to instance %(instance_id)s" -#: nova/api/ec2/cloud.py:1281 +#: nova/api/ec2/cloud.py:1282 msgid "Unable to associate IP Address, no fixed_ips." msgstr "Unable to associate IP Address, no fixed_ips." -#: nova/api/ec2/cloud.py:1302 +#: nova/api/ec2/cloud.py:1303 #, python-format msgid "Disassociate address %s" msgstr "Disassociate address %s" -#: nova/api/ec2/cloud.py:1319 nova/api/openstack/compute/servers.py:920 +#: nova/api/ec2/cloud.py:1320 nova/api/openstack/compute/servers.py:920 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "min_count must be <= max_count" -#: nova/api/ec2/cloud.py:1351 +#: nova/api/ec2/cloud.py:1352 msgid "Image must be available" msgstr "Image must be available" -#: nova/api/ec2/cloud.py:1451 +#: nova/api/ec2/cloud.py:1452 #, python-format msgid "Reboot instance %r" msgstr "Reboot instance %r" -#: nova/api/ec2/cloud.py:1566 +#: nova/api/ec2/cloud.py:1567 #, python-format msgid "De-registering image %s" msgstr "De-registering image %s" -#: nova/api/ec2/cloud.py:1582 +#: nova/api/ec2/cloud.py:1583 msgid "imageLocation is required" msgstr "imageLocation is required" -#: nova/api/ec2/cloud.py:1602 +#: nova/api/ec2/cloud.py:1603 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "Registered image %(image_location)s with id %(image_id)s" -#: nova/api/ec2/cloud.py:1663 +#: nova/api/ec2/cloud.py:1664 msgid "user or group not specified" msgstr "user or group not specified" -#: nova/api/ec2/cloud.py:1666 +#: nova/api/ec2/cloud.py:1667 msgid "only group \"all\" is supported" msgstr "only group \"all\" is supported" -#: nova/api/ec2/cloud.py:1669 +#: nova/api/ec2/cloud.py:1670 msgid "operation_type must be add or remove" msgstr "operation_type must be add or remove" -#: nova/api/ec2/cloud.py:1671 +#: nova/api/ec2/cloud.py:1672 #, python-format msgid "Updating image %s publicity" msgstr "Updating image %s publicity" -#: nova/api/ec2/cloud.py:1684 +#: nova/api/ec2/cloud.py:1685 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "Not allowed to modify attributes for image %s" -#: nova/api/ec2/cloud.py:1714 +#: nova/api/ec2/cloud.py:1715 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " @@ -2255,64 +2296,64 @@ msgstr "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" -#: nova/api/ec2/cloud.py:1747 +#: nova/api/ec2/cloud.py:1748 #, python-format msgid "" "Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " "%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1771 +#: nova/api/ec2/cloud.py:1772 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "image of %(instance)s at %(now)s" -#: nova/api/ec2/cloud.py:1796 nova/api/ec2/cloud.py:1846 +#: nova/api/ec2/cloud.py:1797 nova/api/ec2/cloud.py:1847 msgid "resource_id and tag are required" msgstr "" -#: nova/api/ec2/cloud.py:1800 nova/api/ec2/cloud.py:1850 +#: nova/api/ec2/cloud.py:1801 nova/api/ec2/cloud.py:1851 #, fuzzy msgid "Expecting a list of resources" msgstr "Getting list of instances" -#: nova/api/ec2/cloud.py:1805 nova/api/ec2/cloud.py:1855 -#: nova/api/ec2/cloud.py:1913 +#: nova/api/ec2/cloud.py:1806 nova/api/ec2/cloud.py:1856 +#: nova/api/ec2/cloud.py:1914 #, fuzzy msgid "Only instances implemented" msgstr "instance not present" -#: nova/api/ec2/cloud.py:1809 nova/api/ec2/cloud.py:1859 +#: nova/api/ec2/cloud.py:1810 nova/api/ec2/cloud.py:1860 #, fuzzy msgid "Expecting a list of tagSets" msgstr "Getting list of instances" -#: nova/api/ec2/cloud.py:1815 nova/api/ec2/cloud.py:1868 +#: nova/api/ec2/cloud.py:1816 nova/api/ec2/cloud.py:1869 msgid "Expecting tagSet to be key/value pairs" msgstr "" -#: nova/api/ec2/cloud.py:1822 +#: nova/api/ec2/cloud.py:1823 msgid "Expecting both key and value to be set" msgstr "" -#: nova/api/ec2/cloud.py:1873 +#: nova/api/ec2/cloud.py:1874 msgid "Expecting key to be set" msgstr "" -#: nova/api/ec2/cloud.py:1947 +#: nova/api/ec2/cloud.py:1948 msgid "Invalid CIDR" msgstr "Invalid CIDR" -#: nova/api/ec2/ec2utils.py:254 +#: nova/api/ec2/ec2utils.py:255 #, python-format msgid "Unacceptable attach status:%s for ec2 API." msgstr "" -#: nova/api/ec2/ec2utils.py:277 +#: nova/api/ec2/ec2utils.py:278 msgid "Request must include either Timestamp or Expires, but cannot contain both" msgstr "" -#: nova/api/ec2/ec2utils.py:295 +#: nova/api/ec2/ec2utils.py:296 #, fuzzy msgid "Timestamp is invalid." msgstr "The request is invalid." @@ -2589,8 +2630,8 @@ msgstr "" msgid "Instance does not exist" msgstr "Instance does not exist" -#: nova/api/openstack/compute/ips.py:90 -#: nova/api/openstack/compute/plugins/v3/ips.py:62 +#: nova/api/openstack/compute/ips.py:84 +#: nova/api/openstack/compute/plugins/v3/ips.py:56 msgid "Instance is not a member of specified network" msgstr "Instance is not a member of specified network" @@ -2984,14 +3025,14 @@ msgstr "" msgid "Delete snapshot with id: %s" msgstr "Delete snapshot with id: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:105 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:103 #, fuzzy msgid "Attach interface" msgstr "Failed to add interface: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:158 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:184 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:116 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:145 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:166 #: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174 #: nova/network/security_group/neutron_driver.py:510 #: nova/network/security_group/neutron_driver.py:514 @@ -3002,17 +3043,12 @@ msgstr "Failed to add interface: %s" msgid "Network driver does not support this function." msgstr "Virt driver does not implement uptime function." -#: nova/api/openstack/compute/contrib/attach_interfaces.py:124 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 #, fuzzy msgid "Failed to attach interface" msgstr "Failed to add interface: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:134 -#, fuzzy -msgid "Attachments update is not supported" -msgstr "attribute not supported: %s" - -#: nova/api/openstack/compute/contrib/attach_interfaces.py:146 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:136 #: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144 #, fuzzy, python-format msgid "Detach interface %s" @@ -3506,16 +3542,6 @@ msgid "" " %(quota_used)s" msgstr "" -#: nova/api/openstack/compute/contrib/rescue.py:78 -#: nova/api/openstack/compute/plugins/v3/rescue.py:80 -msgid "The rescue operation is not implemented by this cloud." -msgstr "" - -#: nova/api/openstack/compute/contrib/rescue.py:98 -#: nova/api/openstack/compute/plugins/v3/rescue.py:104 -msgid "The unrescue operation is not implemented by this cloud." -msgstr "" - #: nova/api/openstack/compute/contrib/scheduler_hints.py:37 #: nova/api/openstack/compute/plugins/v3/scheduler_hints.py:39 msgid "Malformed scheduler_hints attribute" @@ -4855,7 +4881,7 @@ msgstr "" msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" -#: nova/compute/manager.py:1235 nova/compute/manager.py:2057 +#: nova/compute/manager.py:1235 nova/compute/manager.py:2064 msgid "Success" msgstr "" @@ -4880,7 +4906,7 @@ msgstr "DB error: %s" msgid "Instance build timed out. Set to error state." msgstr "Instance build timed out. Set to error state." -#: nova/compute/manager.py:1524 nova/compute/manager.py:1888 +#: nova/compute/manager.py:1524 nova/compute/manager.py:1894 msgid "Starting instance..." msgstr "Starting instance..." @@ -4896,69 +4922,64 @@ msgstr "" msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" -#: nova/compute/manager.py:2020 +#: nova/compute/manager.py:2027 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2030 nova/compute/manager.py:2080 +#: nova/compute/manager.py:2037 nova/compute/manager.py:2087 msgid "Failed to allocate the network(s), not rescheduling." msgstr "" -#: nova/compute/manager.py:2106 +#: nova/compute/manager.py:2113 msgid "Failure prepping block device." msgstr "" -#: nova/compute/manager.py:2127 +#: nova/compute/manager.py:2134 msgid "Could not clean up failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2185 +#: nova/compute/manager.py:2192 #, fuzzy msgid "Failed to deallocate network for instance." msgstr "Failed to dealloc network for deleted instance" -#: nova/compute/manager.py:2206 +#: nova/compute/manager.py:2213 #, python-format msgid "%(action_str)s instance" msgstr "%(action_str)s instance" -#: nova/compute/manager.py:2361 +#: nova/compute/manager.py:2368 msgid "Instance disappeared during terminate" msgstr "" -#: nova/compute/manager.py:2547 +#: nova/compute/manager.py:2554 msgid "Rebuilding instance" msgstr "Rebuilding instance" -#: nova/compute/manager.py:2560 +#: nova/compute/manager.py:2567 msgid "Invalid state of instance files on shared storage" msgstr "" -#: nova/compute/manager.py:2564 +#: nova/compute/manager.py:2571 msgid "disk on shared storage, recreating using existing disk" msgstr "" -#: nova/compute/manager.py:2568 +#: nova/compute/manager.py:2575 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "" -#: nova/compute/manager.py:2655 -#, python-format -msgid "bringing vm to original state: '%s'" -msgstr "" - -#: nova/compute/manager.py:2686 +#: nova/compute/manager.py:2694 #, fuzzy, python-format msgid "Detaching from volume api: %s" msgstr "Attach boot from volume failed: %s" -#: nova/compute/manager.py:2713 +#: nova/compute/manager.py:2721 msgid "Rebooting instance" msgstr "Rebooting instance" -#: nova/compute/manager.py:2730 +#: nova/compute/manager.py:2738 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " @@ -4967,25 +4988,25 @@ msgstr "" "trying to reboot a non-running instance: (state: %(state)s expected: " "%(running)s)" -#: nova/compute/manager.py:2766 +#: nova/compute/manager.py:2774 msgid "Reboot failed but instance is running" msgstr "" -#: nova/compute/manager.py:2774 +#: nova/compute/manager.py:2782 #, python-format msgid "Cannot reboot instance: %s" msgstr "" -#: nova/compute/manager.py:2786 +#: nova/compute/manager.py:2794 #, fuzzy msgid "Instance disappeared during reboot" msgstr "instance %s: rebooted" -#: nova/compute/manager.py:2854 +#: nova/compute/manager.py:2862 msgid "instance snapshotting" msgstr "instance snapshotting" -#: nova/compute/manager.py:2860 +#: nova/compute/manager.py:2868 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " @@ -4994,162 +5015,162 @@ msgstr "" "trying to snapshot a non-running instance: (state: %(state)s expected: " "%(running)s)" -#: nova/compute/manager.py:2893 +#: nova/compute/manager.py:2901 #, python-format msgid "Error while trying to clean up image %s" msgstr "" -#: nova/compute/manager.py:2898 +#: nova/compute/manager.py:2906 msgid "Image not found during snapshot" msgstr "" -#: nova/compute/manager.py:2980 +#: nova/compute/manager.py:2988 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "Failed to set admin password. Instance %s is not running" -#: nova/compute/manager.py:2987 +#: nova/compute/manager.py:2995 msgid "Root password set" msgstr "Root password set" -#: nova/compute/manager.py:2992 +#: nova/compute/manager.py:3000 #, fuzzy msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "set_admin_password is not implemented by this driver." -#: nova/compute/manager.py:3011 +#: nova/compute/manager.py:3019 msgid "error setting admin password" msgstr "error setting admin password" -#: nova/compute/manager.py:3027 +#: nova/compute/manager.py:3035 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " "expected: %(expected_state)s)" msgstr "" -#: nova/compute/manager.py:3032 +#: nova/compute/manager.py:3040 #, python-format msgid "injecting file to %s" msgstr "" -#: nova/compute/manager.py:3050 +#: nova/compute/manager.py:3058 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" msgstr "" -#: nova/compute/manager.py:3069 +#: nova/compute/manager.py:3077 msgid "Rescuing" msgstr "Rescuing" -#: nova/compute/manager.py:3094 +#: nova/compute/manager.py:3102 #, fuzzy, python-format msgid "Driver Error: %s" msgstr "DB error: %s" -#: nova/compute/manager.py:3117 +#: nova/compute/manager.py:3125 msgid "Unrescuing" msgstr "Unrescuing" -#: nova/compute/manager.py:3188 +#: nova/compute/manager.py:3196 #, python-format msgid "Migration %s is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3193 +#: nova/compute/manager.py:3201 #, python-format msgid "Migration %s is already confirmed" msgstr "" -#: nova/compute/manager.py:3197 +#: nova/compute/manager.py:3205 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " "confirmation process" msgstr "" -#: nova/compute/manager.py:3211 +#: nova/compute/manager.py:3219 msgid "Instance is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3392 +#: nova/compute/manager.py:3400 #, fuzzy, python-format msgid "Updating instance to original state: '%s'" msgstr "Setting instance to %(state)s state." -#: nova/compute/manager.py:3415 +#: nova/compute/manager.py:3423 #, fuzzy msgid "Instance has no source host" msgstr "Instance has no volume." -#: nova/compute/manager.py:3421 +#: nova/compute/manager.py:3429 msgid "destination same as source!" msgstr "destination same as source!" -#: nova/compute/manager.py:3439 +#: nova/compute/manager.py:3447 msgid "Migrating" msgstr "Migrating" -#: nova/compute/manager.py:3771 +#: nova/compute/manager.py:3784 msgid "Pausing" msgstr "Pausing" -#: nova/compute/manager.py:3788 +#: nova/compute/manager.py:3801 msgid "Unpausing" msgstr "Unpausing" -#: nova/compute/manager.py:3829 nova/compute/manager.py:3846 +#: nova/compute/manager.py:3842 nova/compute/manager.py:3859 msgid "Retrieving diagnostics" msgstr "Retrieving diagnostics" -#: nova/compute/manager.py:3882 +#: nova/compute/manager.py:3895 msgid "Resuming" msgstr "Resuming" -#: nova/compute/manager.py:4102 +#: nova/compute/manager.py:4115 msgid "Get console output" msgstr "Get console output" -#: nova/compute/manager.py:4301 +#: nova/compute/manager.py:4314 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "Attaching volume %(volume_id)s to %(mountpoint)s" -#: nova/compute/manager.py:4326 +#: nova/compute/manager.py:4339 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "Detach volume %(volume_id)s from mountpoint %(mp)s" -#: nova/compute/manager.py:4337 +#: nova/compute/manager.py:4350 msgid "Detaching volume from unknown instance" msgstr "Detaching volume from unknown instance" -#: nova/compute/manager.py:4525 +#: nova/compute/manager.py:4544 #, fuzzy, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "allocate_for_instance() for %s" -#: nova/compute/manager.py:4549 +#: nova/compute/manager.py:4568 #, python-format msgid "Port %s is not attached" msgstr "" -#: nova/compute/manager.py:4561 nova/tests/compute/test_compute.py:10659 +#: nova/compute/manager.py:4580 nova/tests/compute/test_compute.py:10791 #, python-format msgid "Host %s not found" msgstr "" -#: nova/compute/manager.py:4779 +#: nova/compute/manager.py:4798 msgid "_post_live_migration() is started.." msgstr "_post_live_migration() is started.." -#: nova/compute/manager.py:4855 +#: nova/compute/manager.py:4874 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "" -#: nova/compute/manager.py:4857 +#: nova/compute/manager.py:4876 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." @@ -5157,15 +5178,15 @@ msgstr "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." -#: nova/compute/manager.py:4882 +#: nova/compute/manager.py:4901 msgid "Post operation of migration started" msgstr "Post operation of migration started" -#: nova/compute/manager.py:5087 +#: nova/compute/manager.py:5106 msgid "An error occurred while refreshing the network cache." msgstr "" -#: nova/compute/manager.py:5140 +#: nova/compute/manager.py:5159 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " @@ -5174,12 +5195,12 @@ msgstr "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" -#: nova/compute/manager.py:5145 +#: nova/compute/manager.py:5164 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "Setting migration %(migration_id)s to error: %(reason)s" -#: nova/compute/manager.py:5154 +#: nova/compute/manager.py:5173 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " @@ -5188,26 +5209,26 @@ msgstr "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" -#: nova/compute/manager.py:5164 +#: nova/compute/manager.py:5183 #, python-format msgid "Instance %s not found" msgstr "" -#: nova/compute/manager.py:5169 +#: nova/compute/manager.py:5188 msgid "In ERROR state" msgstr "In ERROR state" -#: nova/compute/manager.py:5176 +#: nova/compute/manager.py:5195 #, fuzzy, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "In states %(vm_state)s/%(task_state)s, notRESIZED/None" -#: nova/compute/manager.py:5187 +#: nova/compute/manager.py:5206 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" -#: nova/compute/manager.py:5236 +#: nova/compute/manager.py:5255 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " @@ -5216,15 +5237,15 @@ msgstr "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." -#: nova/compute/manager.py:5285 +#: nova/compute/manager.py:5304 msgid "Updating bandwidth usage cache" msgstr "Updating bandwidth usage cache" -#: nova/compute/manager.py:5307 +#: nova/compute/manager.py:5326 msgid "Bandwidth usage not supported by hypervisor." msgstr "" -#: nova/compute/manager.py:5430 +#: nova/compute/manager.py:5449 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " @@ -5233,7 +5254,7 @@ msgstr "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." -#: nova/compute/manager.py:5496 +#: nova/compute/manager.py:5515 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" @@ -5242,105 +5263,105 @@ msgstr "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" -#: nova/compute/manager.py:5509 +#: nova/compute/manager.py:5528 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" -#: nova/compute/manager.py:5534 +#: nova/compute/manager.py:5553 msgid "Instance shutdown by itself. Calling the stop API." msgstr "Instance shutdown by itself. Calling the stop API." -#: nova/compute/manager.py:5553 +#: nova/compute/manager.py:5572 #, fuzzy msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "Instance is paused or suspended unexpectedly. Calling the stop API." -#: nova/compute/manager.py:5569 +#: nova/compute/manager.py:5588 #, fuzzy msgid "Instance is paused unexpectedly. Ignore." msgstr "Instance is paused or suspended unexpectedly. Calling the stop API." -#: nova/compute/manager.py:5575 +#: nova/compute/manager.py:5594 msgid "Instance is unexpectedly not found. Ignore." msgstr "" -#: nova/compute/manager.py:5581 +#: nova/compute/manager.py:5600 msgid "Instance is not stopped. Calling the stop API." msgstr "Instance is not stopped. Calling the stop API." -#: nova/compute/manager.py:5595 +#: nova/compute/manager.py:5614 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5628 msgid "Instance is not (soft-)deleted." msgstr "Instance is not (soft-)deleted." -#: nova/compute/manager.py:5639 +#: nova/compute/manager.py:5658 msgid "Reclaiming deleted instance" msgstr "Reclaiming deleted instance" -#: nova/compute/manager.py:5643 +#: nova/compute/manager.py:5662 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5668 +#: nova/compute/manager.py:5687 #, fuzzy, python-format msgid "Deleting orphan compute node %s" msgstr "Loading compute driver '%s'" -#: nova/compute/manager.py:5676 nova/compute/resource_tracker.py:406 +#: nova/compute/manager.py:5695 nova/compute/resource_tracker.py:406 #, python-format msgid "No service record for host %s" msgstr "No service record for host %s" -#: nova/compute/manager.py:5716 +#: nova/compute/manager.py:5735 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5722 +#: nova/compute/manager.py:5741 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" " still present on host." msgstr "" -#: nova/compute/manager.py:5731 +#: nova/compute/manager.py:5750 msgid "set_bootable is not implemented for the current driver" msgstr "" -#: nova/compute/manager.py:5736 +#: nova/compute/manager.py:5755 msgid "Failed to power off instance" msgstr "" -#: nova/compute/manager.py:5740 +#: nova/compute/manager.py:5759 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5750 +#: nova/compute/manager.py:5769 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5754 +#: nova/compute/manager.py:5773 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "" -#: nova/compute/manager.py:5786 +#: nova/compute/manager.py:5805 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "" -#: nova/compute/manager.py:5796 +#: nova/compute/manager.py:5815 #, fuzzy, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "Setting instance to %(state)s state." @@ -5720,7 +5741,7 @@ msgstr "" msgid "Exception while seeding instance_types table" msgstr "" -#: nova/image/glance.py:236 +#: nova/image/glance.py:235 #, python-format msgid "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " @@ -5729,19 +5750,19 @@ msgstr "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " "%(extra)s." -#: nova/image/glance.py:268 +#: nova/image/glance.py:267 #, python-format msgid "" "When loading the module %(module_str)s the following error occurred: " "%(ex)s" msgstr "" -#: nova/image/glance.py:327 +#: nova/image/glance.py:326 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "" -#: nova/image/glance.py:343 +#: nova/image/glance.py:342 #, python-format msgid "Successfully transferred using %s" msgstr "" @@ -5887,7 +5908,7 @@ msgstr "" msgid "Not deleting key %s" msgstr "" -#: nova/network/api.py:196 nova/network/neutronv2/api.py:812 +#: nova/network/api.py:196 nova/network/neutronv2/api.py:845 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "re-assign floating IP %(address)s from instance %(instance_id)s" @@ -6217,88 +6238,32 @@ msgstr "Cannot delete aggregate: %(id)s" msgid "Invalid IP format %s" msgstr "" -#: nova/network/neutronv2/api.py:230 -#, python-format -msgid "Neutron error creating port on network %s" -msgstr "" - -#: nova/network/neutronv2/api.py:263 +#: nova/network/neutronv2/api.py:269 #, python-format msgid "empty project id for instance %s" msgstr "empty project id for instance %s" -#: nova/network/neutronv2/api.py:298 -msgid "No network configured!" +#: nova/network/neutronv2/api.py:313 nova/network/neutronv2/api.py:678 +msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" -#: nova/network/neutronv2/api.py:318 +#: nova/network/neutronv2/api.py:335 #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more " "specific." msgstr "" -#: nova/network/neutronv2/api.py:388 -#, python-format -msgid "Failed to update port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:395 -#, python-format -msgid "Failed to delete port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:458 +#: nova/network/neutronv2/api.py:489 #, python-format msgid "Unable to reset device ID for port %s" msgstr "" -#: nova/network/neutronv2/api.py:466 -#, python-format -msgid "Port %s does not exist" -msgstr "" - -#: nova/network/neutronv2/api.py:469 nova/network/neutronv2/api.py:493 -#, python-format -msgid "Failed to delete neutron port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:647 -msgid "Multiple possible networks found, use a Network ID to be more specific." -msgstr "" - -#: nova/network/neutronv2/api.py:666 -#, python-format -msgid "Failed to access port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:898 -#, python-format -msgid "Unable to access floating IP %s" -msgstr "" - -#: nova/network/neutronv2/api.py:986 +#: nova/network/neutronv2/api.py:1021 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "Multiple floating IP pools matches found for name '%s'" -#: nova/network/neutronv2/api.py:1030 -#, python-format -msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" -msgstr "" - -#: nova/network/neutronv2/api.py:1089 -#, python-format -msgid "Unable to update host of port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:1125 -#, python-format -msgid "" -"Network %(id)s not matched with the tenants network! The ports tenant " -"%(tenant_id)s will be used." -msgstr "" - #: nova/network/security_group/neutron_driver.py:57 #, python-format msgid "Neutron Error creating security group %s" @@ -6382,6 +6347,14 @@ msgid "" "%(instance)s" msgstr "" +#: nova/network/security_group/security_group_base.py:89 +msgid "Type and Code must be integers for ICMP protocol type" +msgstr "" + +#: nova/network/security_group/security_group_base.py:92 +msgid "To and From ports must be integers" +msgstr "" + #: nova/network/security_group/security_group_base.py:134 #, python-format msgid "This rule already exists in group %s" @@ -6392,17 +6365,17 @@ msgstr "This rule already exists in group %s" msgid "Error setting %(attr)s" msgstr "error setting admin password" -#: nova/objects/base.py:256 +#: nova/objects/base.py:262 #, python-format msgid "Unable to instantiate unregistered object type %(objtype)s" msgstr "" -#: nova/objects/base.py:375 +#: nova/objects/base.py:381 #, python-format msgid "Cannot load '%s' in the base class" msgstr "" -#: nova/objects/base.py:421 +#: nova/objects/base.py:427 #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "" @@ -6491,22 +6464,22 @@ msgstr "" msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" -#: nova/openstack/common/log.py:276 +#: nova/openstack/common/log.py:289 #, fuzzy, python-format msgid "Deprecated: %s" msgstr "Deprecated Config: %s" -#: nova/openstack/common/log.py:385 +#: nova/openstack/common/log.py:397 #, fuzzy, python-format msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "Error reading image info file %(filename)s: %(error)s" -#: nova/openstack/common/log.py:446 +#: nova/openstack/common/log.py:458 #, python-format msgid "syslog facility must be one of: %s" msgstr "syslog facility must be one of: %s" -#: nova/openstack/common/log.py:689 +#: nova/openstack/common/log.py:709 #, fuzzy, python-format msgid "Fatal call to deprecated config: %(msg)s" msgstr "Fatal call to deprecated config %(msg)s" @@ -6889,16 +6862,6 @@ msgstr "" msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group" msgstr "" -#: nova/storage/linuxscsi.py:100 -#, python-format -msgid "Multipath call failed exit (%(code)s)" -msgstr "" - -#: nova/storage/linuxscsi.py:121 -#, python-format -msgid "Couldn't find multipath device %s" -msgstr "" - #: nova/tests/fake_ldap.py:33 msgid "Attempted to instantiate singleton" msgstr "Attempted to instantiate singleton" @@ -6919,7 +6882,7 @@ msgstr "" msgid "already detached" msgstr "already detached" -#: nova/tests/api/test_auth.py:98 +#: nova/tests/api/test_auth.py:97 msgid "unexpected role header" msgstr "unexpected role header" @@ -6946,32 +6909,32 @@ msgstr "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 r msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" -#: nova/tests/compute/test_compute.py:1696 -#: nova/tests/compute/test_compute.py:1723 -#: nova/tests/compute/test_compute.py:1801 -#: nova/tests/compute/test_compute.py:1841 -#: nova/tests/compute/test_compute.py:5644 +#: nova/tests/compute/test_compute.py:1770 +#: nova/tests/compute/test_compute.py:1797 +#: nova/tests/compute/test_compute.py:1875 +#: nova/tests/compute/test_compute.py:1915 +#: nova/tests/compute/test_compute.py:5718 #, python-format msgid "Running instances: %s" msgstr "Running instances: %s" -#: nova/tests/compute/test_compute.py:1703 -#: nova/tests/compute/test_compute.py:1771 -#: nova/tests/compute/test_compute.py:1809 +#: nova/tests/compute/test_compute.py:1777 +#: nova/tests/compute/test_compute.py:1845 +#: nova/tests/compute/test_compute.py:1883 #, python-format msgid "After terminating instances: %s" msgstr "After terminating instances: %s" -#: nova/tests/compute/test_compute.py:5655 +#: nova/tests/compute/test_compute.py:5729 #, python-format msgid "After force-killing instances: %s" msgstr "After force-killing instances: %s" -#: nova/tests/compute/test_compute.py:6271 +#: nova/tests/compute/test_compute.py:6345 msgid "wrong host/node" msgstr "" -#: nova/tests/compute/test_compute.py:10867 +#: nova/tests/compute/test_compute.py:10999 #, fuzzy msgid "spawn error" msgstr "unknown guestmount error" @@ -6989,7 +6952,7 @@ msgstr "" msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs" msgstr "" -#: nova/tests/db/test_migrations.py:923 +#: nova/tests/db/test_migrations.py:931 #, python-format msgid "" "The following migrations are missing a downgrade:\n" @@ -7076,28 +7039,28 @@ msgstr "Body: %s" msgid "Unexpected status code" msgstr "Unexpected status code" -#: nova/tests/virt/hyperv/test_hypervapi.py:513 +#: nova/tests/virt/hyperv/test_hypervapi.py:515 #, fuzzy msgid "fake vswitch not found" msgstr "marker [%s] not found" -#: nova/tests/virt/hyperv/test_hypervapi.py:966 +#: nova/tests/virt/hyperv/test_hypervapi.py:968 msgid "Simulated failure" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1048 +#: nova/tests/virt/libvirt/fakelibvirt.py:1051 msgid "Expected a list for 'auth' parameter" msgstr "Expected a list for 'auth' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1052 +#: nova/tests/virt/libvirt/fakelibvirt.py:1055 msgid "Expected a function in 'auth[0]' parameter" msgstr "Expected a function in 'auth[0]' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1056 +#: nova/tests/virt/libvirt/fakelibvirt.py:1059 msgid "Expected a function in 'auth[1]' parameter" msgstr "Expected a function in 'auth[1]' parameter" -#: nova/tests/virt/libvirt/fakelibvirt.py:1067 +#: nova/tests/virt/libvirt/fakelibvirt.py:1070 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." @@ -7112,22 +7075,22 @@ msgstr "Property %(attr)s not set for the managed object %(name)s" msgid "There is no VM registered" msgstr "There is no VM registered" -#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1323 +#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1338 #, python-format msgid "Virtual Machine with ref %s is not there" msgstr "Virtual Machine with ref %s is not there" -#: nova/tests/virt/vmwareapi/fake.py:1112 +#: nova/tests/virt/vmwareapi/fake.py:1127 msgid "Session Invalid" msgstr "Session Invalid" -#: nova/tests/virt/vmwareapi/fake.py:1320 +#: nova/tests/virt/vmwareapi/fake.py:1335 #, fuzzy msgid "No Virtual Machine has been registered yet" msgstr " No Virtual Machine has been registered yet" -#: nova/tests/virt/vmwareapi/test_ds_util.py:221 -#: nova/virt/vmwareapi/ds_util.py:267 +#: nova/tests/virt/vmwareapi/test_ds_util.py:215 +#: nova/virt/vmwareapi/ds_util.py:261 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7151,12 +7114,12 @@ msgstr "" msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "" -#: nova/virt/block_device.py:241 +#: nova/virt/block_device.py:255 #, python-format msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/virt/block_device.py:363 +#: nova/virt/block_device.py:401 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "Booting with volume %(volume_id)s at %(mountpoint)s" @@ -7171,29 +7134,29 @@ msgstr "" msgid "Invalid type for %s entry" msgstr "" -#: nova/virt/driver.py:705 +#: nova/virt/driver.py:708 msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" -#: nova/virt/driver.py:1261 +#: nova/virt/driver.py:1264 msgid "Event must be an instance of nova.virt.event.Event" msgstr "" -#: nova/virt/driver.py:1267 +#: nova/virt/driver.py:1270 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "" -#: nova/virt/driver.py:1361 +#: nova/virt/driver.py:1364 msgid "Compute driver option required, but not specified" msgstr "Compute driver option required, but not specified" -#: nova/virt/driver.py:1364 +#: nova/virt/driver.py:1367 #, python-format msgid "Loading compute driver '%s'" msgstr "Loading compute driver '%s'" -#: nova/virt/driver.py:1371 +#: nova/virt/driver.py:1374 #, fuzzy msgid "Unable to load the virtualization driver" msgstr "Unable to load the virtualization driver: %s" @@ -7227,22 +7190,22 @@ msgstr "" msgid "Attempted to unfilter instance which is not filtered" msgstr "Attempted to unfilter instance which is not filtered" -#: nova/virt/hardware.py:45 +#: nova/virt/hardware.py:46 #, python-format msgid "No CPUs available after parsing %r" msgstr "" -#: nova/virt/hardware.py:77 nova/virt/hardware.py:81 +#: nova/virt/hardware.py:78 nova/virt/hardware.py:82 #, python-format msgid "Invalid range expression %r" msgstr "" -#: nova/virt/hardware.py:90 +#: nova/virt/hardware.py:91 #, fuzzy, python-format msgid "Invalid exclusion expression %r" msgstr "Invalid reservation expiration %(expire)s." -#: nova/virt/hardware.py:97 +#: nova/virt/hardware.py:98 #, fuzzy, python-format msgid "Invalid inclusion expression %r" msgstr "Invalid reservation expiration %(expire)s." @@ -7740,22 +7703,22 @@ msgid "" " (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:154 +#: nova/virt/disk/vfs/guestfs.py:156 #, python-format msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:170 +#: nova/virt/disk/vfs/guestfs.py:172 #, fuzzy, python-format msgid "Failed to close augeas %s" msgstr "Failed to live migrate VM %s" -#: nova/virt/disk/vfs/guestfs.py:178 +#: nova/virt/disk/vfs/guestfs.py:180 #, python-format msgid "Failed to shutdown appliance %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:186 +#: nova/virt/disk/vfs/guestfs.py:188 #, fuzzy, python-format msgid "Failed to close guest handle %s" msgstr "Failed to understand rule %(rule)s" @@ -7870,6 +7833,13 @@ msgstr "" msgid "Failed to remove snapshot for VM %s" msgstr "Failed to remove snapshot for VM %s" +#: nova/virt/hyperv/utilsfactory.py:68 +msgid "" +"The \"force_hyperv_utils_v1\" option cannot be set to \"True\" on Windows" +" Server / Hyper-V Server 2012 R2 or above as the WMI " +"\"root/virtualization\" namespace is no longer supported." +msgstr "" + #: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64 #, python-format msgid "Unsupported disk format: %s" @@ -7910,12 +7880,12 @@ msgstr "" msgid "Spawning new instance" msgstr "Starting instance" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:576 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:574 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "" -#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:580 +#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:578 msgid "Using config drive for instance" msgstr "" @@ -7924,7 +7894,7 @@ msgstr "" msgid "Creating config drive at %(path)s" msgstr "Creating config drive at %(path)s" -#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:605 +#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:603 #, fuzzy, python-format msgid "Creating config drive failed with error: %s" msgstr "Creating config drive at %(path)s" @@ -8015,83 +7985,99 @@ msgstr "" msgid "Unable to determine disk bus for '%s'" msgstr "Unable to find vbd for vdi %s" -#: nova/virt/libvirt/driver.py:552 +#: nova/virt/libvirt/driver.py:550 #, python-format msgid "Connection to libvirt lost: %s" msgstr "" -#: nova/virt/libvirt/driver.py:741 +#: nova/virt/libvirt/driver.py:739 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "Can not handle authentication request for %d credentials" -#: nova/virt/libvirt/driver.py:924 +#: nova/virt/libvirt/driver.py:922 msgid "operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:1248 +#: nova/virt/libvirt/driver.py:1246 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" -#: nova/virt/libvirt/driver.py:1255 +#: nova/virt/libvirt/driver.py:1253 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" -#: nova/virt/libvirt/driver.py:1345 +#: nova/virt/libvirt/driver.py:1351 msgid "Swap only supports host devices" msgstr "" -#: nova/virt/libvirt/driver.py:1631 +#: nova/virt/libvirt/driver.py:1638 msgid "libvirt error while requesting blockjob info." msgstr "" -#: nova/virt/libvirt/driver.py:1774 +#: nova/virt/libvirt/driver.py:1783 msgid "Found no disk to snapshot." msgstr "" -#: nova/virt/libvirt/driver.py:1866 +#: nova/virt/libvirt/driver.py:1875 #, python-format msgid "Unknown type: %s" msgstr "" -#: nova/virt/libvirt/driver.py:1871 +#: nova/virt/libvirt/driver.py:1880 msgid "snapshot_id required in create_info" msgstr "" -#: nova/virt/libvirt/driver.py:1929 +#: nova/virt/libvirt/driver.py:1938 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" -#: nova/virt/libvirt/driver.py:1936 +#: nova/virt/libvirt/driver.py:1945 #, python-format msgid "Unknown delete_info type %s" msgstr "" -#: nova/virt/libvirt/driver.py:1964 +#: nova/virt/libvirt/driver.py:1981 #, python-format msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2406 nova/virt/xenapi/vmops.py:1561 +#: nova/virt/libvirt/driver.py:1990 +msgid "filename cannot be None" +msgstr "" + +#: nova/virt/libvirt/driver.py:2019 +#, python-format +msgid "no match found for %s" +msgstr "" + +#: nova/virt/libvirt/driver.py:2076 +#, python-format +msgid "" +"Relative blockcommit support was not detected. Libvirt '%s' or later is " +"required for online deletion of network storage-backed volume snapshots." +msgstr "" + +#: nova/virt/libvirt/driver.py:2491 nova/virt/xenapi/vmops.py:1561 msgid "Guest does not have a console available" msgstr "Guest does not have a console available" -#: nova/virt/libvirt/driver.py:2735 +#: nova/virt/libvirt/driver.py:2820 #, python-format msgid "%s format is not supported" msgstr "" -#: nova/virt/libvirt/driver.py:2841 +#: nova/virt/libvirt/driver.py:2926 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "" -#: nova/virt/libvirt/driver.py:2984 +#: nova/virt/libvirt/driver.py:3069 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " @@ -8100,22 +8086,22 @@ msgstr "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" -#: nova/virt/libvirt/driver.py:2990 +#: nova/virt/libvirt/driver.py:3075 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "Config requested a custom CPU model, but no model name was provided" -#: nova/virt/libvirt/driver.py:2994 +#: nova/virt/libvirt/driver.py:3079 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "A CPU model name should not be set when a host CPU model is requested" -#: nova/virt/libvirt/driver.py:3586 +#: nova/virt/libvirt/driver.py:3689 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3607 +#: nova/virt/libvirt/driver.py:3710 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " @@ -8124,23 +8110,23 @@ msgstr "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3873 +#: nova/virt/libvirt/driver.py:3976 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "" -#: nova/virt/libvirt/driver.py:3998 +#: nova/virt/libvirt/driver.py:4101 msgid "libvirt version is too old (does not support getVersion)" msgstr "libvirt version is too old (does not support getVersion)" -#: nova/virt/libvirt/driver.py:4359 +#: nova/virt/libvirt/driver.py:4462 msgid "Block migration can not be used with shared storage." msgstr "Block migration can not be used with shared storage." -#: nova/virt/libvirt/driver.py:4368 +#: nova/virt/libvirt/driver.py:4471 msgid "Live migration can not be used without shared storage." msgstr "Live migration can not be used without shared storage." -#: nova/virt/libvirt/driver.py:4438 +#: nova/virt/libvirt/driver.py:4541 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " @@ -8149,7 +8135,7 @@ msgstr "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" -#: nova/virt/libvirt/driver.py:4477 +#: nova/virt/libvirt/driver.py:4580 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8164,12 +8150,12 @@ msgstr "" "\n" "Refer to %(u)s" -#: nova/virt/libvirt/driver.py:4540 +#: nova/virt/libvirt/driver.py:4643 #, python-format msgid "The firewall filter for %s does not exist" msgstr "The firewall filter for %s does not exist" -#: nova/virt/libvirt/driver.py:4603 +#: nova/virt/libvirt/driver.py:4706 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " "or your destination node does not support retrieving listen addresses. " @@ -8178,7 +8164,7 @@ msgid "" "address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." msgstr "" -#: nova/virt/libvirt/driver.py:4620 +#: nova/virt/libvirt/driver.py:4723 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," " and the graphics (VNC and/or SPICE) listen addresses on the destination" @@ -8188,14 +8174,14 @@ msgid "" "succeed, but the VM will continue to listen on the current addresses." msgstr "" -#: nova/virt/libvirt/driver.py:4997 +#: nova/virt/libvirt/driver.py:5100 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:5123 +#: nova/virt/libvirt/driver.py:5226 msgid "Unable to resize disk down." msgstr "" @@ -8261,19 +8247,19 @@ msgstr "Path %s must be LVM logical volume" msgid "volume_clear='%s' is not handled" msgstr "" -#: nova/virt/libvirt/rbd.py:104 +#: nova/virt/libvirt/rbd_utils.py:104 msgid "rbd python libraries not found" msgstr "" -#: nova/virt/libvirt/rbd.py:159 +#: nova/virt/libvirt/rbd_utils.py:159 msgid "Not stored in rbd" msgstr "" -#: nova/virt/libvirt/rbd.py:163 +#: nova/virt/libvirt/rbd_utils.py:163 msgid "Blank components" msgstr "" -#: nova/virt/libvirt/rbd.py:166 +#: nova/virt/libvirt/rbd_utils.py:166 msgid "Not an rbd snapshot" msgstr "" @@ -8285,13 +8271,13 @@ msgstr "" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "Can't retrieve root device path from instance libvirt configuration" -#: nova/virt/libvirt/vif.py:338 nova/virt/libvirt/vif.py:545 -#: nova/virt/libvirt/vif.py:709 +#: nova/virt/libvirt/vif.py:322 nova/virt/libvirt/vif.py:508 +#: nova/virt/libvirt/vif.py:652 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" -#: nova/virt/libvirt/vif.py:344 nova/virt/libvirt/vif.py:551 -#: nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:328 nova/virt/libvirt/vif.py:514 +#: nova/virt/libvirt/vif.py:658 #, fuzzy, python-format msgid "Unexpected vif_type=%s" msgstr "Unexpected error: %s" @@ -8315,48 +8301,54 @@ msgstr "" msgid "Fibre Channel device not found." msgstr "iSCSI device not found at %s" -#: nova/virt/vmwareapi/driver.py:127 +#: nova/virt/vmwareapi/driver.py:125 +msgid "" +"Must specify host_ip, host_username and host_password to use " +"vmwareapi.VMwareVCDriver" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:134 #, python-format msgid "Invalid Regular Expression %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:141 +#: nova/virt/vmwareapi/driver.py:148 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "" -#: nova/virt/vmwareapi/driver.py:319 +#: nova/virt/vmwareapi/driver.py:342 #, python-format msgid "The resource %s does not exist" msgstr "" -#: nova/virt/vmwareapi/driver.py:381 +#: nova/virt/vmwareapi/driver.py:404 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:555 +#: nova/virt/vmwareapi/driver.py:582 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." msgstr "" -#: nova/virt/vmwareapi/driver.py:678 +#: nova/virt/vmwareapi/driver.py:705 #, python-format msgid "Unable to validate session %s!" msgstr "" -#: nova/virt/vmwareapi/driver.py:720 +#: nova/virt/vmwareapi/driver.py:747 #, python-format msgid "Session %s is inactive!" msgstr "" -#: nova/virt/vmwareapi/driver.py:811 +#: nova/virt/vmwareapi/driver.py:838 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" -#: nova/virt/vmwareapi/driver.py:821 +#: nova/virt/vmwareapi/driver.py:848 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "In vmwareapi:_poll_task, Got this error %s" @@ -8575,53 +8567,53 @@ msgstr "" msgid "Extending virtual disk failed with error: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:253 +#: nova/virt/vmwareapi/vmops.py:252 msgid "Image disk size greater than requested disk size" msgstr "" -#: nova/virt/vmwareapi/vmops.py:861 +#: nova/virt/vmwareapi/vmops.py:859 msgid "instance is not powered on" msgstr "instance is not powered on" -#: nova/virt/vmwareapi/vmops.py:889 +#: nova/virt/vmwareapi/vmops.py:887 msgid "Instance does not exist on backend" msgstr "" -#: nova/virt/vmwareapi/vmops.py:916 +#: nova/virt/vmwareapi/vmops.py:914 #, python-format msgid "" "In vmwareapi:vmops:_destroy_instance, got this exception while un-" "registering the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:939 +#: nova/virt/vmwareapi/vmops.py:937 msgid "" "In vmwareapi:vmops:_destroy_instance, exception while deleting the VM " "contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:971 +#: nova/virt/vmwareapi/vmops.py:969 msgid "pause not supported for vmwareapi" msgstr "pause not supported for vmwareapi" -#: nova/virt/vmwareapi/vmops.py:975 +#: nova/virt/vmwareapi/vmops.py:973 msgid "unpause not supported for vmwareapi" msgstr "unpause not supported for vmwareapi" -#: nova/virt/vmwareapi/vmops.py:993 +#: nova/virt/vmwareapi/vmops.py:991 #, fuzzy msgid "instance is powered off and cannot be suspended." msgstr "instance is powered off and can not be suspended." -#: nova/virt/vmwareapi/vmops.py:1013 +#: nova/virt/vmwareapi/vmops.py:1011 msgid "instance is not in a suspended state" msgstr "instance is not in a suspended state" -#: nova/virt/vmwareapi/vmops.py:1113 +#: nova/virt/vmwareapi/vmops.py:1111 msgid "Unable to shrink disk." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1172 +#: nova/virt/vmwareapi/vmops.py:1170 #, fuzzy, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" @@ -8630,21 +8622,21 @@ msgstr "" "In vmwareapi:vmops:destroy, got this exception while un-registering the " "VM: %s" -#: nova/virt/vmwareapi/vmops.py:1248 nova/virt/xenapi/vmops.py:1500 +#: nova/virt/vmwareapi/vmops.py:1246 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "Found %(instance_count)d hung reboots older than %(timeout)d seconds" -#: nova/virt/vmwareapi/vmops.py:1252 nova/virt/xenapi/vmops.py:1504 +#: nova/virt/vmwareapi/vmops.py:1250 nova/virt/xenapi/vmops.py:1504 msgid "Automatically hard rebooting" msgstr "Automatically hard rebooting" -#: nova/virt/vmwareapi/vmops.py:1570 +#: nova/virt/vmwareapi/vmops.py:1568 #, python-format msgid "No device with interface-id %s exists on VM" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1580 +#: nova/virt/vmwareapi/vmops.py:1578 #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-error.po b/nova/locale/es/LC_MESSAGES/nova-log-error.po index 6e3b9f91b2..97d19bdac9 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-error.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" -"PO-Revision-Date: 2014-08-11 15:41+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" +"PO-Revision-Date: 2014-08-14 10:51+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -203,7 +203,7 @@ msgstr "No se ha podido desasignar la red para la instancia suprimida" msgid "Failed to dealloc network for failed instance" msgstr "Fallo al desasociar red para la instancia fallida" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "Error al intentar volver a programar " @@ -214,105 +214,146 @@ msgstr "" "La configuración de red de la instancia falló después de %(attempts)d intento" "(s)" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "Ha fallado la configuración de dispositivo de bloque en la instancia" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "La instancia no se ha podido generar" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "Fallo de compilación inesperado, no se reprogramará la compilación." -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "Fallo al asociar red(es)" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "Fallo al preparar el dispositivo de bloques" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "Fallo al desasociar redes" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "Estableciendo el vm_state de la instancia a ERROR" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "Fallo al obtener compute_info para %s" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "set_admin_password ha fallado: %s" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "Error al intentar Rescatar Instancia" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "Fallo al revertir las cuotas para un finish_resize fallido: %s" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "Fallo al asociar %(volume_id)s en %(mountpoint)s" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "No se ha podido desconectar el volumen %(volume_id)s de %(mp)s" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" "Fallo para intercambiar volúmen %(old_volume_id)s por %(new_volume_id)s" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" "Fallo al conectar hacia al volúmen %(volume_id)s con el volumen en " "%(mountpoint)s" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "Previo a migración en vivo falló en %s" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "Tarea periódica falló al descargar instancia." -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" "No se ha podido generar auditoría de uso para la instancia en el host %s " -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" "La tarea periódica sync_power_state ha tenido un error al procesar una " "instancia." -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "error durante stop() en sync_power_state." +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "Error de Neutron al crear puerto en la red: %s" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "Ha habido un fallo al actualizar el puerto %s" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "Ha ocurrido un fallo al eliminar el puerto %s" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "Fallo al eliminar el puerto de neutron %s" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "Fallo al acceder al puerto %s" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "Incapaz de acceder a la Ip flotante %s" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" +"Incapaz de acceder a la IP flotante %(fixed_ip)s para el puerto %(port_id)s" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "Incapaz de actualizar el anfitrión del puerto %s" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "No se ha podido notificar a las células el error de instancia" @@ -406,50 +447,50 @@ msgstr "" "Se ha encontrado un error en el montaje del sistema de archivos de " "contenedor '%(image)s' en '%(target)s': : %(errors)s" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" "Nova necesita libvirt versión %(major)i.%(minor)i.%(micro)i o superior." -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "Ha fallado la conexión a libvirt: %s" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "Error de libvirt durante destrucción. Código=%(errcode)s Error=%(e)s" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" "Error de libvirt durante borrado de definición. Código=%(errcode)s Error=" "%(e)s" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" "Error de libvirt durante eliminación de filtro. Código=%(errcode)s Error=" "%(e)s" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "se ha encontrado un error en la conexión del adaptador de red." -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "se ha encontrado un error en la desconexión del adaptador de red." -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" "Fallo al enviar estado de instantánea actualizada al servicio de volumen." -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." @@ -457,68 +498,68 @@ msgstr "" "Incapaz de crear instantánea de VM inmovilizada, intentando nuevamente con " "la inmovilidad deshabilitada" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" "Incapaz de crear instantánea de VM, operación de volume_snapshot fallida." -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" "Error ocurrido durante volume_snapshot_create, enviando estado de error a " "Cinder." -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" "Ha ocurrido un error durante volume_snapshot_delete, envinado estado de " "error a Cinder." -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "Error en '%(path)s' al comprobar E/S directa: '%(ex)s'" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "Error al inyectar datos en imagen %(img_id)s (%(e)s)" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "La creación de unidad de configuración ha fallado con el error: %s" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "La asociación de dispositivos PCI %(dev)s a %(dom)s ha fallado." -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" "Neutron ha reportado una falla en el evento %(event)s para la instancia " "%(uuid)s" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " @@ -527,12 +568,12 @@ msgstr "" "El nombre del anfitrión ha cambiado de %(old)s a %(new)s. Se requiere un " "reinicio para hacer efecto." -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "Fallo en migración en vivo: %s" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "No se ha podido limpiar el directorio %(target)s: %(e)s" @@ -551,17 +592,17 @@ msgstr "" "El tamaño virtual %(base_size)s de %(base)s es más grande que el tamaño del " "disco raíz del sabor %(size)s" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "Error al leer imagen en archivo %(filename)s: %(error)s" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "imagen %(id)s en (%(base_file)s): ha fallado la verificación de imagen" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "No se ha podido eliminar %(base_file)s, el error era %(error)s" @@ -571,19 +612,19 @@ msgstr "No se ha podido eliminar %(base_file)s, el error era %(error)s" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "Ignorando valor no reconocido volume_clear='%s'" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "Error al abrir imagen rbd %s" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "Fallo al conectar vif" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "No se ha podido desconectar vif" @@ -602,18 +643,18 @@ msgstr "No se puede desmontar el recurso compartido NFS %s" msgid "Couldn't unmount the GlusterFS share %s" msgstr "No se puede desmontar el recurso compartido GlusterFS %s" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-info.po b/nova/locale/es/LC_MESSAGES/nova-log-info.po index d0dd8e8bee..75b47b8a43 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" -"PO-Revision-Date: 2014-08-07 07:51+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" +"PO-Revision-Date: 2014-08-15 05:00+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" "es/)\n" @@ -44,7 +44,12 @@ msgstr "Excepción de HTTP emitida: %s" msgid "Deleting network with id %s" msgstr "Suprimiendo red con el id %s" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "poniendo vm en estado original: '%s'" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -148,100 +153,105 @@ msgstr "Eliminando registro duplicado con id: %(id)s de la tabla: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "La instancia se ha destruido satisfactoriamente. " -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "La instancia puede volver a iniciarse." -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "Se va a volver a destruir la instancia." -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "Empezando proceso de instantánea en directo" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "Empezando proceso de instantánea frío" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "Se ha extraído instantánea, empezando subida de imagen" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "Subida de imagen de instantánea se ha completado" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "" "La instancia ha rearrancado satisfactoriamente de forma no permanente. " -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "La instancia ha concluido satisfactoriamente." -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "Es posible que la instancia se haya rearrancado durante el arranque no " "permanente, por consiguiente volver ahora." -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "La instancia ha rearrancado satisfactoriamente." -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "Instancia generada satisfactoriamente. " -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Se ha devuelto registro de consola truncado, se han ignorado %d bytes " -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "Creando imagen" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "Utilizando unidad de configuración" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creando unidad de configuración en %(path)s" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "Configurando la zona horaria para la instancia windows a horario local" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -251,7 +261,7 @@ msgstr "" "desasociado. Instancia=%(instance_name)s Disco=%(disk)s Código=%(errcode)s " "Error=%(e)s" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -260,26 +270,26 @@ msgstr "" "No se ha podido encontrar el dominio en libvirt para la instancia %s. No se " "pueden obtener estadísticas de bloque para el dispositivo" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "La instancia se está ejecutando satisfactoriamente." -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "Eliminado los archivos de instancia %s" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "La remoción de %s ha fallado" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "La remoción de %s se ha completado" @@ -296,12 +306,12 @@ msgstr "Asegurando filtros estáticos" msgid "Attempted to unfilter instance which is not filtered" msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "Grabando información almacenada en %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" @@ -309,27 +319,27 @@ msgstr "" "imagen %(id)s en (%(base_file)s): se ha saltado la verificación de imagen, " "no hay ningún hash almacenado" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s): generando suma de comprobación" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "El archivo de base es demasiado nuevo para eliminarse: %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "Eliminando archivo de base: %s " -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "imagen %(id)s en (%(base_file)s): comprobando" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -338,17 +348,17 @@ msgstr "" "imagen %(id)s en (%(base_file)s): en uso: en este nodo %(local)d local, " "%(remote)d en otros nodos que comparten este almacenamiento de instancia" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "Archivos de base activos: %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "Archivos de base corruptos: %s " -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "Archivos de base eliminables: %s" diff --git a/nova/locale/es/LC_MESSAGES/nova-log-warning.po b/nova/locale/es/LC_MESSAGES/nova-log-warning.po index 762a305d67..02a8ed3bde 100644 --- a/nova/locale/es/LC_MESSAGES/nova-log-warning.po +++ b/nova/locale/es/LC_MESSAGES/nova-log-warning.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-06-24 16:11+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/" @@ -34,7 +34,7 @@ msgid "" "will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/api/ec2/cloud.py:1289 +#: nova/api/ec2/cloud.py:1290 #: nova/api/openstack/compute/contrib/floating_ips.py:254 #, python-format msgid "multiple fixed_ips exist, using the first: %s" @@ -115,16 +115,16 @@ msgstr "" msgid "Instance has had its instance_type removed from the DB" msgstr "" -#: nova/compute/manager.py:2016 +#: nova/compute/manager.py:2023 msgid "No more network or fixed IP to be allocated" msgstr "" -#: nova/compute/manager.py:2256 +#: nova/compute/manager.py:2263 #, python-format msgid "Ignoring EndpointNotFound: %s" msgstr "" -#: nova/compute/manager.py:2274 +#: nova/compute/manager.py:2281 #, python-format msgid "Failed to delete volume: %(volume_id)s due to %(exc)s" msgstr "" @@ -164,22 +164,38 @@ msgstr "" msgid "Instance: %(instance_uuid)s failed to save into memcached" msgstr "" -#: nova/network/neutronv2/api.py:214 +#: nova/network/neutronv2/api.py:218 #, python-format msgid "Neutron error: Port quota exceeded in tenant: %s" msgstr "" -#: nova/network/neutronv2/api.py:219 +#: nova/network/neutronv2/api.py:223 #, python-format msgid "Neutron error: No more fixed IPs in network: %s" msgstr "" -#: nova/network/neutronv2/api.py:223 +#: nova/network/neutronv2/api.py:227 #, python-format msgid "" "Neutron error: MAC address %(mac)s is already in use on network %(network)s." msgstr "" +#: nova/network/neutronv2/api.py:302 +msgid "No network configured!" +msgstr "" + +#: nova/network/neutronv2/api.py:497 +#, python-format +msgid "Port %s does not exist" +msgstr "" + +#: nova/network/neutronv2/api.py:1160 +#, python-format +msgid "" +"Network %(id)s not matched with the tenants network! The ports tenant " +"%(tenant_id)s will be used." +msgstr "" + #: nova/openstack/common/loopingcall.py:87 #, python-format msgid "task %(func_name)s run outlasted interval by %(delay).2f sec" @@ -241,6 +257,21 @@ msgstr "No se puede decodificar cpu_allocation_ratio: '%s'" msgid "Could not decode ram_allocation_ratio: '%s'" msgstr "No se puede decodificar ram_allocation_ratio: '%s'" +#: nova/storage/linuxscsi.py:100 +#, python-format +msgid "Multipath call failed exit (%(code)s)" +msgstr "" + +#: nova/storage/linuxscsi.py:121 +#, python-format +msgid "Couldn't find multipath device %s" +msgstr "" + +#: nova/storage/linuxscsi.py:130 +#, python-format +msgid "Skip faulty line \"%(dev_line)s\" of multipath device %(mdev)s" +msgstr "" + #: nova/virt/disk/api.py:366 #, python-format msgid "Ignoring error injecting data into image %(image)s (%(e)s)" @@ -255,14 +286,14 @@ msgstr "" msgid "Unable to import guestfs, falling back to VFSLocalFS" msgstr "" -#: nova/virt/libvirt/driver.py:370 +#: nova/virt/libvirt/driver.py:376 #, python-format msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s." msgstr "" "Modalidad de caché %(cache_mode)s no válida especificada para el tipo de " "disco %(disk_type)s." -#: nova/virt/libvirt/driver.py:616 +#: nova/virt/libvirt/driver.py:614 #, python-format msgid "" "The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack " @@ -273,81 +304,81 @@ msgstr "" "projecto de OpenStack por lo cual su calidad no puede ser asegurada. Para " "mas información, ver: https://wiki.openstack.org/wiki/HypervisorSupportMatrix" -#: nova/virt/libvirt/driver.py:673 +#: nova/virt/libvirt/driver.py:671 #, python-format msgid "URI %(uri)s does not support events: %(error)s" msgstr "URI %(uri)s no soporta eventos: %(error)s" -#: nova/virt/libvirt/driver.py:689 +#: nova/virt/libvirt/driver.py:687 #, python-format msgid "URI %(uri)s does not support connection events: %(error)s" msgstr "URI %(uri)s no soporta eventos de conexión: %(error)s" -#: nova/virt/libvirt/driver.py:921 +#: nova/virt/libvirt/driver.py:919 msgid "Cannot destroy instance, operation time out" msgstr "" "No se puede destruir intsancia, tiempo de espera agotado para la operación" -#: nova/virt/libvirt/driver.py:945 +#: nova/virt/libvirt/driver.py:943 msgid "During wait destroy, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1029 +#: nova/virt/libvirt/driver.py:1027 msgid "Instance may be still running, destroy it again." msgstr "Puede que la instancia aún se esté ejecutando, vuelva a destruirla." -#: nova/virt/libvirt/driver.py:1082 +#: nova/virt/libvirt/driver.py:1080 #, python-format msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s" msgstr "" "Ignorando Error de volumen en volumen %(vol_id)s durante la remocion %(exc)s" -#: nova/virt/libvirt/driver.py:1132 +#: nova/virt/libvirt/driver.py:1130 #, python-format msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually" msgstr "" "El volumen %(disk)s es posiblemente inseguro para remover, por favor " "límpialo manualmente" -#: nova/virt/libvirt/driver.py:1408 nova/virt/libvirt/driver.py:1416 +#: nova/virt/libvirt/driver.py:1414 nova/virt/libvirt/driver.py:1422 msgid "During detach_volume, instance disappeared." msgstr "Durante detach_volume, la instancia ha desaparecido." -#: nova/virt/libvirt/driver.py:1461 +#: nova/virt/libvirt/driver.py:1467 msgid "During detach_interface, instance disappeared." msgstr "Durante detach_interface, la instancia ha desaparecido." -#: nova/virt/libvirt/driver.py:2051 +#: nova/virt/libvirt/driver.py:2136 msgid "Failed to soft reboot instance. Trying hard reboot." msgstr "" "Fallo al reiniciar la instancia de manera suave. Intentando reinicio duro." -#: nova/virt/libvirt/driver.py:2608 +#: nova/virt/libvirt/driver.py:2693 #, python-format msgid "Image %s not found on disk storage. Continue without injecting data" msgstr "" "La imagen %s no se ha encontrado en el almacenamiento de disco. Continuando " "sin inyectar datos." -#: nova/virt/libvirt/driver.py:2795 +#: nova/virt/libvirt/driver.py:2880 msgid "File injection into a boot from volume instance is not supported" msgstr "" "La inyección de archivo al arranque desde la instancia del volumen no está " "soportado." -#: nova/virt/libvirt/driver.py:2870 +#: nova/virt/libvirt/driver.py:2955 msgid "Instance disappeared while detaching a PCI device from it." msgstr "" "La instancia ha desaparecido mientras se removía el dispositivo PCI de ella." -#: nova/virt/libvirt/driver.py:2925 +#: nova/virt/libvirt/driver.py:3010 #, python-format msgid "Cannot update service status on host: %s,since it is not registered." msgstr "" "No se puede actualizar el estado del servicio en el anfitrión: %s, ya que el " "mismo no está registrado." -#: nova/virt/libvirt/driver.py:2928 +#: nova/virt/libvirt/driver.py:3013 #, python-format msgid "" "Cannot update service status on host: %s,due to an unexpected exception." @@ -355,24 +386,24 @@ msgstr "" "No se puede atualizar el estado del servicio en el anfitrión: %s, debido a " "una excepción inesperada." -#: nova/virt/libvirt/driver.py:2956 +#: nova/virt/libvirt/driver.py:3041 #, python-format msgid "URI %(uri)s does not support full set of host capabilities: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:3785 +#: nova/virt/libvirt/driver.py:3888 #, python-format msgid "Timeout waiting for vif plugging callback for instance %(uuid)s" msgstr "" "Tiempo excedido para la llamada inversa de la conexión vif para la instancia " "%(uuid)s" -#: nova/virt/libvirt/driver.py:3806 +#: nova/virt/libvirt/driver.py:3909 #, python-format msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3863 +#: nova/virt/libvirt/driver.py:3966 msgid "" "Cannot get the number of cpu, because this function is not implemented for " "this platform. " @@ -380,28 +411,28 @@ msgstr "" "No se puede obtener el número de CPU porque esta función no está " "implementada para esta plataforma. " -#: nova/virt/libvirt/driver.py:3925 +#: nova/virt/libvirt/driver.py:4028 #, python-format msgid "" "couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3956 +#: nova/virt/libvirt/driver.py:4059 #, python-format msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4158 +#: nova/virt/libvirt/driver.py:4261 #, python-format msgid "URI %(uri)s does not support listDevices: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:4813 +#: nova/virt/libvirt/driver.py:4916 #, python-format msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d." msgstr "plug_vifs() ha fallado %(cnt)d. Intentando hasta %(max_retry)d." -#: nova/virt/libvirt/driver.py:5023 +#: nova/virt/libvirt/driver.py:5126 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error " @@ -410,7 +441,7 @@ msgstr "" "Error de libvirt al obtener la descripción de %(instance_name)s: [Código de " "error %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:5031 +#: nova/virt/libvirt/driver.py:5134 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -421,7 +452,7 @@ msgstr "" "intentando obtener el disco %(i_name)s, pero el disco ha sido removido por " "operaciones concurrentes como la modificación de tamaño." -#: nova/virt/libvirt/driver.py:5037 +#: nova/virt/libvirt/driver.py:5140 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -436,7 +467,7 @@ msgstr "" "El módulo lLibvirt no se ha podido cargar. NWFilterFirewall no funcionará " "correctamente." -#: nova/virt/libvirt/imagecache.py:318 +#: nova/virt/libvirt/imagecache.py:317 #, python-format msgid "" "Instance %(instance)s is using a backing file %(backing)s which does not " @@ -445,7 +476,7 @@ msgstr "" "La instancia %(instance)s utiliza un archivo de respaldo %(backing)s que no " "aparece en el servicio de la imagen" -#: nova/virt/libvirt/imagecache.py:495 +#: nova/virt/libvirt/imagecache.py:494 #, python-format msgid "" "image %(id)s at (%(base_file)s): warning -- an absent base file is in use! " @@ -454,7 +485,7 @@ msgstr "" "%(id)s (%(base_file)s): aviso -- se está utilizando un archivo base ausente. " "instancias: %(instance_list)s" -#: nova/virt/libvirt/imagecache.py:545 +#: nova/virt/libvirt/imagecache.py:544 #, python-format msgid "Unknown base file: %s" msgstr "Archivo de base desconocido: %s " @@ -470,7 +501,7 @@ msgstr "" "tamaño del volumen virtual es %(size)db, pero el espacio libre en el grupo " "de volúmenes es solo %(free_space)db." -#: nova/virt/libvirt/rbd.py:268 +#: nova/virt/libvirt/rbd_utils.py:268 #, python-format msgid "rbd remove %(volume)s in pool %(pool)s failed" msgstr "" @@ -528,35 +559,40 @@ msgstr "" "El volumen de fibra aún no se ha encontrado en: %(mount_device)s. Se volverá " "a explorar y se reintentará. Número de intentos: %(tries)s" -#: nova/virt/libvirt/volume.py:1036 +#: nova/virt/libvirt/volume.py:995 +#, python-format +msgid "multipath-tools probably work improperly. devices to remove = %s." +msgstr "" + +#: nova/virt/libvirt/volume.py:1040 msgid "Value required for 'scality_sofs_config'" msgstr "Valor necesario para 'scality_sofs_config'" -#: nova/virt/libvirt/volume.py:1047 +#: nova/virt/libvirt/volume.py:1051 #, python-format msgid "Cannot access 'scality_sofs_config': %s" msgstr "No se puede acceder a 'scality_sofs_config': %s" -#: nova/virt/libvirt/volume.py:1053 +#: nova/virt/libvirt/volume.py:1057 msgid "Cannot execute /sbin/mount.sofs" msgstr "No se puede ejecutar /sbin/mount.sofs" -#: nova/virt/libvirt/volume.py:1068 +#: nova/virt/libvirt/volume.py:1072 msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "No se puede montar Scality SOFS, compruebe syslog por si hay errores" -#: nova/virt/vmwareapi/driver.py:95 +#: nova/virt/vmwareapi/driver.py:96 msgid "" "The VMware ESX driver is now deprecated and has been removed in the Juno " "release. The VC driver will remain and continue to be supported." msgstr "" -#: nova/virt/vmwareapi/driver.py:150 +#: nova/virt/vmwareapi/driver.py:157 #, python-format msgid "The following clusters could not be found in the vCenter %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:179 +#: nova/virt/vmwareapi/driver.py:202 msgid "Instance cannot be found in host, or in an unknownstate." msgstr "" diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po index 50b94895f3..dcb821f4da 100644 --- a/nova/locale/es/LC_MESSAGES/nova.po +++ b/nova/locale/es/LC_MESSAGES/nova.po @@ -12,8 +12,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" -"PO-Revision-Date: 2014-08-11 22:50+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" +"PO-Revision-Date: 2014-08-15 22:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish " "(http://www.transifex.com/projects/p/nova/language/es/)\n" @@ -43,19 +43,23 @@ msgstr "" msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:192 +#: nova/block_device.py:191 +msgid "Invalid device UUID." +msgstr "" + +#: nova/block_device.py:195 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:371 +#: nova/block_device.py:374 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:375 +#: nova/block_device.py:378 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:385 +#: nova/block_device.py:388 msgid "Invalid volume_size." msgstr "" @@ -1993,6 +1997,43 @@ msgstr "" msgid "Architecture name '%(arch)s' is not recognised" msgstr "" +#: nova/exception.py:1645 +msgid "CPU and memory allocation must be provided for all NUMA nodes" +msgstr "" + +#: nova/exception.py:1650 +#, python-format +msgid "" +"Image property '%(name)s' is not permitted to override NUMA configuration" +" set against the flavor" +msgstr "" + +#: nova/exception.py:1655 +msgid "" +"Asymmetric NUMA topologies require explicit assignment of CPUs and memory" +" to nodes in image or flavor" +msgstr "" + +#: nova/exception.py:1660 +#, python-format +msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" +msgstr "" + +#: nova/exception.py:1664 +#, python-format +msgid "CPU number %(cpunum)d is assigned to two nodes" +msgstr "" + +#: nova/exception.py:1668 +#, python-format +msgid "CPU number %(cpuset)s is not assigned to any node" +msgstr "" + +#: nova/exception.py:1672 +#, python-format +msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -2232,149 +2273,149 @@ msgstr "" msgid "Unknown error occurred." msgstr "Ha ocurrido un error desconocido." -#: nova/api/ec2/cloud.py:392 +#: nova/api/ec2/cloud.py:391 #, python-format msgid "Create snapshot of volume %s" msgstr "Crear instantánea del volumen %s" -#: nova/api/ec2/cloud.py:417 +#: nova/api/ec2/cloud.py:418 #, python-format msgid "Could not find key pair(s): %s" msgstr "No se ha podido encontrar par(es) de claves: %s " -#: nova/api/ec2/cloud.py:433 +#: nova/api/ec2/cloud.py:434 #, python-format msgid "Create key pair %s" msgstr "Creando par de claves %s" -#: nova/api/ec2/cloud.py:445 +#: nova/api/ec2/cloud.py:446 #, python-format msgid "Import key %s" msgstr "Importar la clave %s" -#: nova/api/ec2/cloud.py:458 +#: nova/api/ec2/cloud.py:459 #, python-format msgid "Delete key pair %s" msgstr "Borrar para de claves %s" -#: nova/api/ec2/cloud.py:600 nova/api/ec2/cloud.py:730 +#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:731 msgid "need group_name or group_id" msgstr "se necesita group_name o group_id" -#: nova/api/ec2/cloud.py:605 +#: nova/api/ec2/cloud.py:606 msgid "can't build a valid rule" msgstr "No se ha podido crear una regla válida" -#: nova/api/ec2/cloud.py:613 +#: nova/api/ec2/cloud.py:614 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "Protocolo IP no válido %(protocol)s" -#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:683 +#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:684 msgid "No rule for the specified parameters." msgstr "No hay regla para los parámetros especificados." -#: nova/api/ec2/cloud.py:761 +#: nova/api/ec2/cloud.py:762 #, python-format msgid "Get console output for instance %s" msgstr "Obtener salida de la consola para la instancia %s" -#: nova/api/ec2/cloud.py:833 +#: nova/api/ec2/cloud.py:834 #, python-format msgid "Create volume from snapshot %s" msgstr "Crear volumen desde la instantánea %s" -#: nova/api/ec2/cloud.py:837 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:838 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "Crear volumen de %s GB" -#: nova/api/ec2/cloud.py:877 +#: nova/api/ec2/cloud.py:878 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" "Conectar el volumen %(volume_id)s a la instancia %(instance_id)s en " "%(device)s" -#: nova/api/ec2/cloud.py:907 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:908 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "Desasociar volumen %s" -#: nova/api/ec2/cloud.py:1261 +#: nova/api/ec2/cloud.py:1262 msgid "Allocate address" msgstr "Asignar dirección" -#: nova/api/ec2/cloud.py:1266 +#: nova/api/ec2/cloud.py:1267 #, python-format msgid "Release address %s" msgstr "Liberar dirección %s" -#: nova/api/ec2/cloud.py:1271 +#: nova/api/ec2/cloud.py:1272 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "Asociar dirección %(public_ip)s a instancia %(instance_id)s" -#: nova/api/ec2/cloud.py:1281 +#: nova/api/ec2/cloud.py:1282 msgid "Unable to associate IP Address, no fixed_ips." msgstr "No se puede asociar la dirección IP, sin fixed_ips." -#: nova/api/ec2/cloud.py:1302 +#: nova/api/ec2/cloud.py:1303 #, python-format msgid "Disassociate address %s" msgstr "Desasociar dirección %s" -#: nova/api/ec2/cloud.py:1319 nova/api/openstack/compute/servers.py:920 +#: nova/api/ec2/cloud.py:1320 nova/api/openstack/compute/servers.py:920 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "min_count debe ser <= max_count " -#: nova/api/ec2/cloud.py:1351 +#: nova/api/ec2/cloud.py:1352 msgid "Image must be available" msgstr "La imagen debe estar disponible " -#: nova/api/ec2/cloud.py:1451 +#: nova/api/ec2/cloud.py:1452 #, python-format msgid "Reboot instance %r" msgstr "Reiniciar instancia %r" -#: nova/api/ec2/cloud.py:1566 +#: nova/api/ec2/cloud.py:1567 #, python-format msgid "De-registering image %s" msgstr "Des-registrando la imagen %s" -#: nova/api/ec2/cloud.py:1582 +#: nova/api/ec2/cloud.py:1583 msgid "imageLocation is required" msgstr "Se necesita imageLocation" -#: nova/api/ec2/cloud.py:1602 +#: nova/api/ec2/cloud.py:1603 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "Imagen registrada %(image_location)s con el id %(image_id)s" -#: nova/api/ec2/cloud.py:1663 +#: nova/api/ec2/cloud.py:1664 msgid "user or group not specified" msgstr "usuario o grupo no especificado" -#: nova/api/ec2/cloud.py:1666 +#: nova/api/ec2/cloud.py:1667 msgid "only group \"all\" is supported" msgstr "sólo el grupo \"all\" está soportado" -#: nova/api/ec2/cloud.py:1669 +#: nova/api/ec2/cloud.py:1670 msgid "operation_type must be add or remove" msgstr "operation_type debe ser añadir o eliminar" -#: nova/api/ec2/cloud.py:1671 +#: nova/api/ec2/cloud.py:1672 #, python-format msgid "Updating image %s publicity" msgstr "Actualizando imagen %s públicamente" -#: nova/api/ec2/cloud.py:1684 +#: nova/api/ec2/cloud.py:1685 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "No está permitido modificar los atributos para la imagen %s" -#: nova/api/ec2/cloud.py:1714 +#: nova/api/ec2/cloud.py:1715 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " @@ -2383,63 +2424,63 @@ msgstr "" "Valor no válido '%(ec2_instance_id)s' para el ID de instancia. La " "instancia no tiene ningún volumen conectado en la raíz (%(root)s)." -#: nova/api/ec2/cloud.py:1747 +#: nova/api/ec2/cloud.py:1748 #, python-format msgid "" "Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " "%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1771 +#: nova/api/ec2/cloud.py:1772 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "imagen de %(instance)s en %(now)s" -#: nova/api/ec2/cloud.py:1796 nova/api/ec2/cloud.py:1846 +#: nova/api/ec2/cloud.py:1797 nova/api/ec2/cloud.py:1847 msgid "resource_id and tag are required" msgstr "resource_id y tag son necesarios" -#: nova/api/ec2/cloud.py:1800 nova/api/ec2/cloud.py:1850 +#: nova/api/ec2/cloud.py:1801 nova/api/ec2/cloud.py:1851 msgid "Expecting a list of resources" msgstr "Esperando una lista de recursos" -#: nova/api/ec2/cloud.py:1805 nova/api/ec2/cloud.py:1855 -#: nova/api/ec2/cloud.py:1913 +#: nova/api/ec2/cloud.py:1806 nova/api/ec2/cloud.py:1856 +#: nova/api/ec2/cloud.py:1914 msgid "Only instances implemented" msgstr "Sólo están implementadas instancias" -#: nova/api/ec2/cloud.py:1809 nova/api/ec2/cloud.py:1859 +#: nova/api/ec2/cloud.py:1810 nova/api/ec2/cloud.py:1860 msgid "Expecting a list of tagSets" msgstr "Esperando una lista de tagSets" -#: nova/api/ec2/cloud.py:1815 nova/api/ec2/cloud.py:1868 +#: nova/api/ec2/cloud.py:1816 nova/api/ec2/cloud.py:1869 msgid "Expecting tagSet to be key/value pairs" msgstr "Esperando que tagSet sea un par clave/valor" -#: nova/api/ec2/cloud.py:1822 +#: nova/api/ec2/cloud.py:1823 msgid "Expecting both key and value to be set" msgstr "Esperando establecimiento tanto de clave como valor" -#: nova/api/ec2/cloud.py:1873 +#: nova/api/ec2/cloud.py:1874 msgid "Expecting key to be set" msgstr "Esperando el establecimiento de la clave" -#: nova/api/ec2/cloud.py:1947 +#: nova/api/ec2/cloud.py:1948 msgid "Invalid CIDR" msgstr "CIDR no válido" -#: nova/api/ec2/ec2utils.py:254 +#: nova/api/ec2/ec2utils.py:255 #, python-format msgid "Unacceptable attach status:%s for ec2 API." msgstr "" -#: nova/api/ec2/ec2utils.py:277 +#: nova/api/ec2/ec2utils.py:278 msgid "Request must include either Timestamp or Expires, but cannot contain both" msgstr "" "La solicitud debe incluir Timestamp o Expires, pero no puede contener " "ambos" -#: nova/api/ec2/ec2utils.py:295 +#: nova/api/ec2/ec2utils.py:296 msgid "Timestamp is invalid." msgstr "La indicación de fecha y hora no es válida." @@ -2722,8 +2763,8 @@ msgstr "No le está permitido suprimir la imagen." msgid "Instance does not exist" msgstr "La instancia no existe " -#: nova/api/openstack/compute/ips.py:90 -#: nova/api/openstack/compute/plugins/v3/ips.py:62 +#: nova/api/openstack/compute/ips.py:84 +#: nova/api/openstack/compute/plugins/v3/ips.py:56 msgid "Instance is not a member of specified network" msgstr "La instancia no es miembro de la red especificada" @@ -3123,13 +3164,13 @@ msgstr "Se ha creado instantánea asistida del volúmen %s" msgid "Delete snapshot with id: %s" msgstr "Suprimir instantánea con el ID: %s" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:105 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:103 msgid "Attach interface" msgstr "Conectar interfaz" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:158 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:184 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:116 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:145 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:166 #: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174 #: nova/network/security_group/neutron_driver.py:510 #: nova/network/security_group/neutron_driver.py:514 @@ -3139,15 +3180,11 @@ msgstr "Conectar interfaz" msgid "Network driver does not support this function." msgstr "El controlador de red no soporta esta función." -#: nova/api/openstack/compute/contrib/attach_interfaces.py:124 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 msgid "Failed to attach interface" msgstr "Se ha encontrado un error al conectar la interfaz." -#: nova/api/openstack/compute/contrib/attach_interfaces.py:134 -msgid "Attachments update is not supported" -msgstr "La actualización de dispositivos conectados no está soportada" - -#: nova/api/openstack/compute/contrib/attach_interfaces.py:146 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:136 #: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144 #, python-format msgid "Detach interface %s" @@ -3646,16 +3683,6 @@ msgstr "" "Valor de cuota %(value)s para %(key)s es menos que lo actualmente " "utilizado y reservado %(quota_used)s" -#: nova/api/openstack/compute/contrib/rescue.py:78 -#: nova/api/openstack/compute/plugins/v3/rescue.py:80 -msgid "The rescue operation is not implemented by this cloud." -msgstr "La operación de rescate no está implementada por esta nube." - -#: nova/api/openstack/compute/contrib/rescue.py:98 -#: nova/api/openstack/compute/plugins/v3/rescue.py:104 -msgid "The unrescue operation is not implemented by this cloud." -msgstr "La operación de abandono no está implementada por esta nube." - #: nova/api/openstack/compute/contrib/scheduler_hints.py:37 #: nova/api/openstack/compute/plugins/v3/scheduler_hints.py:39 msgid "Malformed scheduler_hints attribute" @@ -5075,7 +5102,7 @@ msgstr "" "El volumen con id: %s ha finalizado su creación pero no ha sido marcado " "como 'disponible'" -#: nova/compute/manager.py:1235 nova/compute/manager.py:2057 +#: nova/compute/manager.py:1235 nova/compute/manager.py:2064 msgid "Success" msgstr "Éxito" @@ -5102,7 +5129,7 @@ msgstr "" "La compilación de instancia ha excedido el tiempo de espera. Se ha estado" " en estado erróneo. " -#: nova/compute/manager.py:1524 nova/compute/manager.py:1888 +#: nova/compute/manager.py:1524 nova/compute/manager.py:1894 msgid "Starting instance..." msgstr "Iniciando instancia..." @@ -5122,70 +5149,65 @@ msgstr "" "Fallo de configuración de red de la instancia (intento %(attempt)d de " "%(attempts)d)" -#: nova/compute/manager.py:2020 +#: nova/compute/manager.py:2027 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2030 nova/compute/manager.py:2080 +#: nova/compute/manager.py:2037 nova/compute/manager.py:2087 msgid "Failed to allocate the network(s), not rescheduling." msgstr "Fallo al asociar la(s) red(es), no se reprogramará." -#: nova/compute/manager.py:2106 +#: nova/compute/manager.py:2113 msgid "Failure prepping block device." msgstr "Fallo al preparar el dispositivo de bloque." -#: nova/compute/manager.py:2127 +#: nova/compute/manager.py:2134 msgid "Could not clean up failed build, not rescheduling" msgstr "No se puede limpiar la compilación fallida, no se reprogramará." -#: nova/compute/manager.py:2185 +#: nova/compute/manager.py:2192 msgid "Failed to deallocate network for instance." msgstr "Se ha encontrado un error al desasignar la red para la instancia" -#: nova/compute/manager.py:2206 +#: nova/compute/manager.py:2213 #, python-format msgid "%(action_str)s instance" msgstr "%(action_str)s instancia" -#: nova/compute/manager.py:2361 +#: nova/compute/manager.py:2368 msgid "Instance disappeared during terminate" msgstr "La instancia ha desaparecido durante la terminación" -#: nova/compute/manager.py:2547 +#: nova/compute/manager.py:2554 msgid "Rebuilding instance" msgstr "Volver a crear instancia" -#: nova/compute/manager.py:2560 +#: nova/compute/manager.py:2567 msgid "Invalid state of instance files on shared storage" msgstr "Estado no válido de archivos de instancia en almacenamiento compartido" -#: nova/compute/manager.py:2564 +#: nova/compute/manager.py:2571 msgid "disk on shared storage, recreating using existing disk" msgstr "" "disco en almacenamiento compartido, volviendo a crear utilizando disco " "existente" -#: nova/compute/manager.py:2568 +#: nova/compute/manager.py:2575 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "El disco on está en almacenamiento compartido, reconstruyendo desde: '%s'" -#: nova/compute/manager.py:2655 -#, python-format -msgid "bringing vm to original state: '%s'" -msgstr "poniendo vm en estado original: '%s'" - -#: nova/compute/manager.py:2686 +#: nova/compute/manager.py:2694 #, python-format msgid "Detaching from volume api: %s" msgstr "Desconectando de la API del volumen: %s" -#: nova/compute/manager.py:2713 +#: nova/compute/manager.py:2721 msgid "Rebooting instance" msgstr "Rearrancando instancia" -#: nova/compute/manager.py:2730 +#: nova/compute/manager.py:2738 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " @@ -5194,24 +5216,24 @@ msgstr "" "intentando rearrancar una instancia que no se está ejecutando: (estado: " "%(state)s se esperaba: %(running)s)" -#: nova/compute/manager.py:2766 +#: nova/compute/manager.py:2774 msgid "Reboot failed but instance is running" msgstr "Ha fallado el reinicio pero la instancia se mantiene en ejecución" -#: nova/compute/manager.py:2774 +#: nova/compute/manager.py:2782 #, python-format msgid "Cannot reboot instance: %s" msgstr "No se puede reiniciar instancia: %s" -#: nova/compute/manager.py:2786 +#: nova/compute/manager.py:2794 msgid "Instance disappeared during reboot" msgstr "La instancia ha desaparecido durante el rearranque" -#: nova/compute/manager.py:2854 +#: nova/compute/manager.py:2862 msgid "instance snapshotting" msgstr "creación de instantánea de instancia" -#: nova/compute/manager.py:2860 +#: nova/compute/manager.py:2868 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " @@ -5220,37 +5242,37 @@ msgstr "" "intentando hacer una instantánea de una instancia que no se está " "ejecutando: (estado: %(state)s se esperaba: %(running)s)" -#: nova/compute/manager.py:2893 +#: nova/compute/manager.py:2901 #, python-format msgid "Error while trying to clean up image %s" msgstr "Error al intentar limpiar imagen %s" -#: nova/compute/manager.py:2898 +#: nova/compute/manager.py:2906 msgid "Image not found during snapshot" msgstr "No se ha encontrado la imagen durante la instantánea" -#: nova/compute/manager.py:2980 +#: nova/compute/manager.py:2988 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "" "No se ha podido establecer contraseña de administrador. La instancia %s " "no está ejecutando" -#: nova/compute/manager.py:2987 +#: nova/compute/manager.py:2995 msgid "Root password set" msgstr "Contraseña raíz establecida" -#: nova/compute/manager.py:2992 +#: nova/compute/manager.py:3000 msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "esta instancia de invitado o controlador no implementa set_admin_password" " ." -#: nova/compute/manager.py:3011 +#: nova/compute/manager.py:3019 msgid "error setting admin password" msgstr "error al establecer contraseña de administrador" -#: nova/compute/manager.py:3027 +#: nova/compute/manager.py:3035 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " @@ -5259,12 +5281,12 @@ msgstr "" "intentando inyectar un archivo hacia un inactivo (estado: " "%(current_state)s esperado: %(expected_state)s)" -#: nova/compute/manager.py:3032 +#: nova/compute/manager.py:3040 #, python-format msgid "injecting file to %s" msgstr "inyectando archivo a %s" -#: nova/compute/manager.py:3050 +#: nova/compute/manager.py:3058 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" @@ -5272,30 +5294,30 @@ msgstr "" "No se ha podido encontrar una imagen diferente para utilizarla para VM de" " rescate, se utiliza la imagen actual de la instancia" -#: nova/compute/manager.py:3069 +#: nova/compute/manager.py:3077 msgid "Rescuing" msgstr "Rescatando" -#: nova/compute/manager.py:3094 +#: nova/compute/manager.py:3102 #, python-format msgid "Driver Error: %s" msgstr "Error de dispositivo: %s" -#: nova/compute/manager.py:3117 +#: nova/compute/manager.py:3125 msgid "Unrescuing" msgstr "Cancelando rescate" -#: nova/compute/manager.py:3188 +#: nova/compute/manager.py:3196 #, python-format msgid "Migration %s is not found during confirmation" msgstr "La migración %s no ha sido encontrada durante la confirmación" -#: nova/compute/manager.py:3193 +#: nova/compute/manager.py:3201 #, python-format msgid "Migration %s is already confirmed" msgstr "La migración %s ya ha sido confirmada" -#: nova/compute/manager.py:3197 +#: nova/compute/manager.py:3205 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " @@ -5304,86 +5326,86 @@ msgstr "" "Estado de confirmación inesperado '%(status)s' de la migración %(id)s, " "salir del proceso de confirmación" -#: nova/compute/manager.py:3211 +#: nova/compute/manager.py:3219 msgid "Instance is not found during confirmation" msgstr "La instancia no ha sido encontrada durante la confirmación" -#: nova/compute/manager.py:3392 +#: nova/compute/manager.py:3400 #, python-format msgid "Updating instance to original state: '%s'" msgstr "Actualizando el estado original de instancia hacia: '%s'" -#: nova/compute/manager.py:3415 +#: nova/compute/manager.py:3423 msgid "Instance has no source host" msgstr "La instancia no tiene ningún host de origen" -#: nova/compute/manager.py:3421 +#: nova/compute/manager.py:3429 msgid "destination same as source!" msgstr "destino igual que origen" -#: nova/compute/manager.py:3439 +#: nova/compute/manager.py:3447 msgid "Migrating" msgstr "Migrando" -#: nova/compute/manager.py:3771 +#: nova/compute/manager.py:3784 msgid "Pausing" msgstr "Poniéndose en pausa" -#: nova/compute/manager.py:3788 +#: nova/compute/manager.py:3801 msgid "Unpausing" msgstr "Cancelando la pausa" -#: nova/compute/manager.py:3829 nova/compute/manager.py:3846 +#: nova/compute/manager.py:3842 nova/compute/manager.py:3859 msgid "Retrieving diagnostics" msgstr "Recuperando diagnósticos" -#: nova/compute/manager.py:3882 +#: nova/compute/manager.py:3895 msgid "Resuming" msgstr "Reanudando" -#: nova/compute/manager.py:4102 +#: nova/compute/manager.py:4115 msgid "Get console output" msgstr "Obtener salida de consola " -#: nova/compute/manager.py:4301 +#: nova/compute/manager.py:4314 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "Conectando el volumen %(volume_id)s a %(mountpoint)s" -#: nova/compute/manager.py:4326 +#: nova/compute/manager.py:4339 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "Desconectar el volumen %(volume_id)s del punto de montaje %(mp)s" -#: nova/compute/manager.py:4337 +#: nova/compute/manager.py:4350 msgid "Detaching volume from unknown instance" msgstr "Desconectando volumen de instancia desconocida " -#: nova/compute/manager.py:4525 +#: nova/compute/manager.py:4544 #, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "allocate_port_for_instance ha regresado %(ports)s puertos" -#: nova/compute/manager.py:4549 +#: nova/compute/manager.py:4568 #, python-format msgid "Port %s is not attached" msgstr "El puerto %s no se encuentra asignado" -#: nova/compute/manager.py:4561 nova/tests/compute/test_compute.py:10659 +#: nova/compute/manager.py:4580 nova/tests/compute/test_compute.py:10791 #, python-format msgid "Host %s not found" msgstr "No se ha encontrado el host %s" -#: nova/compute/manager.py:4779 +#: nova/compute/manager.py:4798 msgid "_post_live_migration() is started.." msgstr "Se ha iniciado _post_live_migration()." -#: nova/compute/manager.py:4855 +#: nova/compute/manager.py:4874 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "La migración de la instancia hacia %s ha finalizado exitosamente." -#: nova/compute/manager.py:4857 +#: nova/compute/manager.py:4876 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." @@ -5392,15 +5414,15 @@ msgstr "" "encontrado: ningún dominio con un nombre coincidente.\" Este error se " "puede ignorar sin ningún riesgo." -#: nova/compute/manager.py:4882 +#: nova/compute/manager.py:4901 msgid "Post operation of migration started" msgstr "Se ha iniciado la operación posterior de migración" -#: nova/compute/manager.py:5087 +#: nova/compute/manager.py:5106 msgid "An error occurred while refreshing the network cache." msgstr "Ha ocurrido un error al actualizar el cache de red." -#: nova/compute/manager.py:5140 +#: nova/compute/manager.py:5159 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " @@ -5409,12 +5431,12 @@ msgstr "" "Se han encontrado %(migration_count)d migraciones sin confirmar de más de" " %(confirm_window)d segundos" -#: nova/compute/manager.py:5145 +#: nova/compute/manager.py:5164 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "Estableciendo la %(migration_id)s en error: %(reason)s" -#: nova/compute/manager.py:5154 +#: nova/compute/manager.py:5173 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " @@ -5423,28 +5445,28 @@ msgstr "" "Confirmando automáticamente la migración %(migration_id)s para la " "instancia %(instance_uuid)s" -#: nova/compute/manager.py:5164 +#: nova/compute/manager.py:5183 #, python-format msgid "Instance %s not found" msgstr "No se ha encontrado la instancia %s" -#: nova/compute/manager.py:5169 +#: nova/compute/manager.py:5188 msgid "In ERROR state" msgstr "En estado de ERROR " -#: nova/compute/manager.py:5176 +#: nova/compute/manager.py:5195 #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "En los estados %(vm_state)s/%(task_state)s, no REDIMENSIONADO/Ninguno" -#: nova/compute/manager.py:5187 +#: nova/compute/manager.py:5206 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" "Error auto confirmando modificación de tamaño: %s. Se intentará " "posteriormente." -#: nova/compute/manager.py:5236 +#: nova/compute/manager.py:5255 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " @@ -5453,15 +5475,15 @@ msgstr "" "Ejecutando auditoría de uso de instancia para %(host)s desde " "%(begin_time)s hasta %(end_time)s. %(number_instances)s instancias." -#: nova/compute/manager.py:5285 +#: nova/compute/manager.py:5304 msgid "Updating bandwidth usage cache" msgstr "Actualizando memoria caché de uso de ancho de banda" -#: nova/compute/manager.py:5307 +#: nova/compute/manager.py:5326 msgid "Bandwidth usage not supported by hypervisor." msgstr "Uso de ancho de banda no soportado por el hipervisor." -#: nova/compute/manager.py:5430 +#: nova/compute/manager.py:5449 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " @@ -5470,7 +5492,7 @@ msgstr "" "Se han encontrado %(num_db_instances)s en la base de datos y " "%(num_vm_instances)s en el hipervisor." -#: nova/compute/manager.py:5496 +#: nova/compute/manager.py:5515 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" @@ -5479,65 +5501,65 @@ msgstr "" "Durante el proceso sync_power, la instancia se ha movido del host %(src)s" " al host %(dst)s" -#: nova/compute/manager.py:5509 +#: nova/compute/manager.py:5528 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" "Durante sync_power_state la instancia ha dejado una tarea pendiente " "(%(task)s). Omitir." -#: nova/compute/manager.py:5534 +#: nova/compute/manager.py:5553 msgid "Instance shutdown by itself. Calling the stop API." msgstr "Conclusión de instancia por sí misma. Llamando a la API de detención." -#: nova/compute/manager.py:5553 +#: nova/compute/manager.py:5572 msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "" "La instancia se ha suspendido inesperadamente. Llamando a la API de " "detención." -#: nova/compute/manager.py:5569 +#: nova/compute/manager.py:5588 msgid "Instance is paused unexpectedly. Ignore." msgstr "La instancia se ha puesto en pausa inesperadamente. Ignorar. " -#: nova/compute/manager.py:5575 +#: nova/compute/manager.py:5594 msgid "Instance is unexpectedly not found. Ignore." msgstr "La instancia no se encuentra inesperadamente. Ignorar. " -#: nova/compute/manager.py:5581 +#: nova/compute/manager.py:5600 msgid "Instance is not stopped. Calling the stop API." msgstr "La instancia no se ha detenido. Llamando a la API de detención." -#: nova/compute/manager.py:5595 +#: nova/compute/manager.py:5614 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" "La instancia pausada se ha apagado a si misma. Llamando la API de " "detención." -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5628 msgid "Instance is not (soft-)deleted." msgstr "La instancia no se suprime (de forma no permanente). " -#: nova/compute/manager.py:5639 +#: nova/compute/manager.py:5658 msgid "Reclaiming deleted instance" msgstr "Reclamando instancia suprimida" -#: nova/compute/manager.py:5643 +#: nova/compute/manager.py:5662 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "Reclamación periódica falló al eliminar instancia: %s" -#: nova/compute/manager.py:5668 +#: nova/compute/manager.py:5687 #, python-format msgid "Deleting orphan compute node %s" msgstr "Eliminando nodo de cómputo huérfano %s" -#: nova/compute/manager.py:5676 nova/compute/resource_tracker.py:406 +#: nova/compute/manager.py:5695 nova/compute/resource_tracker.py:406 #, python-format msgid "No service record for host %s" msgstr "Ningún registro de servicio para el host %s " -#: nova/compute/manager.py:5716 +#: nova/compute/manager.py:5735 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " @@ -5547,7 +5569,7 @@ msgstr "" " marcada como ELIMINADA pero todavía se encuentra presente en el " "anfitrión." -#: nova/compute/manager.py:5722 +#: nova/compute/manager.py:5741 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" @@ -5556,15 +5578,15 @@ msgstr "" "Apagando la instancia con nombre '%s' que está marcada como ELIMINADA " "pero sigue presente en el anfitrión." -#: nova/compute/manager.py:5731 +#: nova/compute/manager.py:5750 msgid "set_bootable is not implemented for the current driver" msgstr "set_bootable no está implementado en el controlador actual" -#: nova/compute/manager.py:5736 +#: nova/compute/manager.py:5755 msgid "Failed to power off instance" msgstr "Fallo al apagar la instancia" -#: nova/compute/manager.py:5740 +#: nova/compute/manager.py:5759 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " @@ -5573,22 +5595,22 @@ msgstr "" "Desrtuyendo instancia con etiqueta de nombre '%s' la cual ha sido marcada" " como ELIMINADA pero todavía se encuentra presente en el anfitrión." -#: nova/compute/manager.py:5750 +#: nova/compute/manager.py:5769 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "Limpieza periódica falló al eliminar la instancia: %s" -#: nova/compute/manager.py:5754 +#: nova/compute/manager.py:5773 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valor '%s' no reconocido para CONF.running_deleted_instance_action" -#: nova/compute/manager.py:5786 +#: nova/compute/manager.py:5805 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "Estableciendo la instancia de vuelta a %(state)s tras: %(error)s" -#: nova/compute/manager.py:5796 +#: nova/compute/manager.py:5815 #, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "Marcando la instancia de nuevo como ACTIVA después de: %s" @@ -5986,7 +6008,7 @@ msgstr "Excepción al crear la tabla." msgid "Exception while seeding instance_types table" msgstr "Excepción al iniciar la tabla instance_types" -#: nova/image/glance.py:236 +#: nova/image/glance.py:235 #, python-format msgid "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " @@ -5995,7 +6017,7 @@ msgstr "" "Error al contactar con el servidor de glance '%(host)s:%(port)s' para " "'%(method)s', %(extra)s." -#: nova/image/glance.py:268 +#: nova/image/glance.py:267 #, python-format msgid "" "When loading the module %(module_str)s the following error occurred: " @@ -6004,12 +6026,12 @@ msgstr "" "Al cargar el módulo %(module_str)s se ha presentado el siguiente error: " "%(ex)s" -#: nova/image/glance.py:327 +#: nova/image/glance.py:326 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "Fallo al instanciar el manejador de descargas para %(scheme)s" -#: nova/image/glance.py:343 +#: nova/image/glance.py:342 #, python-format msgid "Successfully transferred using %s" msgstr "Exitosamente transferido utilizando %s" @@ -6170,7 +6192,7 @@ msgstr "" msgid "Not deleting key %s" msgstr "Sin eliminar la clave %s" -#: nova/network/api.py:196 nova/network/neutronv2/api.py:812 +#: nova/network/api.py:196 nova/network/neutronv2/api.py:845 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "volver a asignar IP flotante %(address)s desde instancia %(instance_id)s" @@ -6504,21 +6526,18 @@ msgstr "No se puede suprimir el dominio |%s|" msgid "Invalid IP format %s" msgstr "Formato IP inválido %s" -#: nova/network/neutronv2/api.py:230 -#, python-format -msgid "Neutron error creating port on network %s" -msgstr "Error de Neutron al crear puerto en la red: %s" - -#: nova/network/neutronv2/api.py:263 +#: nova/network/neutronv2/api.py:269 #, python-format msgid "empty project id for instance %s" msgstr "ID de proyecto vacío para la instancia %s" -#: nova/network/neutronv2/api.py:298 -msgid "No network configured!" -msgstr "No hay red configurada!" +#: nova/network/neutronv2/api.py:313 nova/network/neutronv2/api.py:678 +msgid "Multiple possible networks found, use a Network ID to be more specific." +msgstr "" +"Se han encontrado múltiples redes posibles, usa un identificador de red " +"para ser más específico." -#: nova/network/neutronv2/api.py:318 +#: nova/network/neutronv2/api.py:335 #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more " @@ -6527,75 +6546,18 @@ msgstr "" "Se han encontrado varios grupos de seguridad que coinciden con '%s'. " "Utilice un ID para ser más específico." -#: nova/network/neutronv2/api.py:388 -#, python-format -msgid "Failed to update port %s" -msgstr "Falló al actualizar el puerto %s" - -#: nova/network/neutronv2/api.py:395 -#, python-format -msgid "Failed to delete port %s" -msgstr "Ha ocurrido un fallo al eliminar el puerto %s" - -#: nova/network/neutronv2/api.py:458 +#: nova/network/neutronv2/api.py:489 #, python-format msgid "Unable to reset device ID for port %s" msgstr "" -#: nova/network/neutronv2/api.py:466 -#, python-format -msgid "Port %s does not exist" -msgstr "El puerto %s no existe" - -#: nova/network/neutronv2/api.py:469 nova/network/neutronv2/api.py:493 -#, python-format -msgid "Failed to delete neutron port %s" -msgstr "Fallo al eliminar el puerto de neutron %s" - -#: nova/network/neutronv2/api.py:647 -msgid "Multiple possible networks found, use a Network ID to be more specific." -msgstr "" -"Se han encontrado múltiples redes posibles, usa un identificador de red " -"para ser más específico." - -#: nova/network/neutronv2/api.py:666 -#, python-format -msgid "Failed to access port %s" -msgstr "Fallo al acceder al puerto %s" - -#: nova/network/neutronv2/api.py:898 -#, python-format -msgid "Unable to access floating IP %s" -msgstr "Incapaz de acceder a la Ip flotante %s" - -#: nova/network/neutronv2/api.py:986 +#: nova/network/neutronv2/api.py:1021 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Se han encontrado varias coincidencias de agrupaciones de IP flotante " "para el nombre '%s' " -#: nova/network/neutronv2/api.py:1030 -#, python-format -msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" -msgstr "" -"Incapaz de acceder a la IP flotante %(fixed_ip)s para el puerto " -"%(port_id)s" - -#: nova/network/neutronv2/api.py:1089 -#, python-format -msgid "Unable to update host of port %s" -msgstr "Incapaz de actualizar el anfitrión del puerto %s" - -#: nova/network/neutronv2/api.py:1125 -#, python-format -msgid "" -"Network %(id)s not matched with the tenants network! The ports tenant " -"%(tenant_id)s will be used." -msgstr "" -"La red %(id)s no coincide con las redes de los inquilinos! El puerto del " -"inquilino %(tenant_id)s será utilizado." - #: nova/network/security_group/neutron_driver.py:57 #, python-format msgid "Neutron Error creating security group %s" @@ -6684,6 +6646,14 @@ msgstr "" "El grupo de seguridad %(security_group_name)s no está asociado a la " "instancia %(instance)s" +#: nova/network/security_group/security_group_base.py:89 +msgid "Type and Code must be integers for ICMP protocol type" +msgstr "" + +#: nova/network/security_group/security_group_base.py:92 +msgid "To and From ports must be integers" +msgstr "" + #: nova/network/security_group/security_group_base.py:134 #, python-format msgid "This rule already exists in group %s" @@ -6694,17 +6664,17 @@ msgstr "Esta regla ya existe en el grupo %s" msgid "Error setting %(attr)s" msgstr "Error al establecer %(attr)s" -#: nova/objects/base.py:256 +#: nova/objects/base.py:262 #, python-format msgid "Unable to instantiate unregistered object type %(objtype)s" msgstr "Incapaz de instanciar tipo de objeto no registrado %(objtype)s" -#: nova/objects/base.py:375 +#: nova/objects/base.py:381 #, python-format msgid "Cannot load '%s' in the base class" msgstr "No se puede cargar '%s' en la clase base" -#: nova/objects/base.py:421 +#: nova/objects/base.py:427 #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "El objeto %(objname)s no tiene atributo '%(attrname)s'" @@ -6800,22 +6770,22 @@ msgstr "" msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" -#: nova/openstack/common/log.py:276 +#: nova/openstack/common/log.py:289 #, python-format msgid "Deprecated: %s" msgstr "En desuso: %s" -#: nova/openstack/common/log.py:385 +#: nova/openstack/common/log.py:397 #, python-format msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "Error al cargar la configuración de registro %(log_config)s: %(err_msg)s" -#: nova/openstack/common/log.py:446 +#: nova/openstack/common/log.py:458 #, python-format msgid "syslog facility must be one of: %s" msgstr "El recurso syslog debe ser uno de: %s" -#: nova/openstack/common/log.py:689 +#: nova/openstack/common/log.py:709 #, python-format msgid "Fatal call to deprecated config: %(msg)s" msgstr "Llamada muy grave a configuración en desuso: %(msg)s" @@ -7254,16 +7224,6 @@ msgstr "" msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group" msgstr "ZooKeeperDriver.leave: %(id)s no se ha unido al grupo %(gr)s" -#: nova/storage/linuxscsi.py:100 -#, python-format -msgid "Multipath call failed exit (%(code)s)" -msgstr "La llamada a multivía de acceso no ha podido salir (%(code)s)" - -#: nova/storage/linuxscsi.py:121 -#, python-format -msgid "Couldn't find multipath device %s" -msgstr "No se puede encontrar el dispositivo multiruta %s" - #: nova/tests/fake_ldap.py:33 msgid "Attempted to instantiate singleton" msgstr "Intento de instanciar sigleton" @@ -7284,7 +7244,7 @@ msgstr "La instancia y el volumen no están en la misma availability_zone" msgid "already detached" msgstr "ya está desconectado" -#: nova/tests/api/test_auth.py:98 +#: nova/tests/api/test_auth.py:97 msgid "unexpected role header" msgstr "cabecera de rol inesperada" @@ -7315,32 +7275,32 @@ msgstr "" "Se ha superado la cuota para núcleos: Solicitados 2, pero ya utilizados 9" " de 10 núcleos" -#: nova/tests/compute/test_compute.py:1696 -#: nova/tests/compute/test_compute.py:1723 -#: nova/tests/compute/test_compute.py:1801 -#: nova/tests/compute/test_compute.py:1841 -#: nova/tests/compute/test_compute.py:5644 +#: nova/tests/compute/test_compute.py:1770 +#: nova/tests/compute/test_compute.py:1797 +#: nova/tests/compute/test_compute.py:1875 +#: nova/tests/compute/test_compute.py:1915 +#: nova/tests/compute/test_compute.py:5718 #, python-format msgid "Running instances: %s" msgstr "Ejecutando instancias: %s" -#: nova/tests/compute/test_compute.py:1703 -#: nova/tests/compute/test_compute.py:1771 -#: nova/tests/compute/test_compute.py:1809 +#: nova/tests/compute/test_compute.py:1777 +#: nova/tests/compute/test_compute.py:1845 +#: nova/tests/compute/test_compute.py:1883 #, python-format msgid "After terminating instances: %s" msgstr "Después de terminar las instancias: %s" -#: nova/tests/compute/test_compute.py:5655 +#: nova/tests/compute/test_compute.py:5729 #, python-format msgid "After force-killing instances: %s" msgstr "Después de finalizar de forma forzada las instancias: %s" -#: nova/tests/compute/test_compute.py:6271 +#: nova/tests/compute/test_compute.py:6345 msgid "wrong host/node" msgstr "host/nodo incorrecto" -#: nova/tests/compute/test_compute.py:10867 +#: nova/tests/compute/test_compute.py:10999 msgid "spawn error" msgstr "error de generación" @@ -7357,7 +7317,7 @@ msgstr "" msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs" msgstr "" -#: nova/tests/db/test_migrations.py:923 +#: nova/tests/db/test_migrations.py:931 #, python-format msgid "" "The following migrations are missing a downgrade:\n" @@ -7449,27 +7409,27 @@ msgstr "Cuerpo: %s" msgid "Unexpected status code" msgstr "Código de estado inesperado" -#: nova/tests/virt/hyperv/test_hypervapi.py:513 +#: nova/tests/virt/hyperv/test_hypervapi.py:515 msgid "fake vswitch not found" msgstr "vswitch falso no encontrado" -#: nova/tests/virt/hyperv/test_hypervapi.py:966 +#: nova/tests/virt/hyperv/test_hypervapi.py:968 msgid "Simulated failure" msgstr "Falla simulada" -#: nova/tests/virt/libvirt/fakelibvirt.py:1048 +#: nova/tests/virt/libvirt/fakelibvirt.py:1051 msgid "Expected a list for 'auth' parameter" msgstr "Se esperaba una lista para el parámetro 'auth'" -#: nova/tests/virt/libvirt/fakelibvirt.py:1052 +#: nova/tests/virt/libvirt/fakelibvirt.py:1055 msgid "Expected a function in 'auth[0]' parameter" msgstr "Se esperaba una función en el parámetro 'auth[0]' " -#: nova/tests/virt/libvirt/fakelibvirt.py:1056 +#: nova/tests/virt/libvirt/fakelibvirt.py:1059 msgid "Expected a function in 'auth[1]' parameter" msgstr "Se esperaba una función en el parámetro 'auth[1]' " -#: nova/tests/virt/libvirt/fakelibvirt.py:1067 +#: nova/tests/virt/libvirt/fakelibvirt.py:1070 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." @@ -7488,21 +7448,21 @@ msgstr "" msgid "There is no VM registered" msgstr "No hay ninguna VM registrada" -#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1323 +#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1338 #, python-format msgid "Virtual Machine with ref %s is not there" msgstr "La máquina virtual con la referencia %s no está allí" -#: nova/tests/virt/vmwareapi/fake.py:1112 +#: nova/tests/virt/vmwareapi/fake.py:1127 msgid "Session Invalid" msgstr "Sesión no válida" -#: nova/tests/virt/vmwareapi/fake.py:1320 +#: nova/tests/virt/vmwareapi/fake.py:1335 msgid "No Virtual Machine has been registered yet" msgstr "No se ha registrado aún ninguna máquina virtual " -#: nova/tests/virt/vmwareapi/test_ds_util.py:221 -#: nova/virt/vmwareapi/ds_util.py:267 +#: nova/tests/virt/vmwareapi/test_ds_util.py:215 +#: nova/virt/vmwareapi/ds_util.py:261 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -7532,14 +7492,14 @@ msgstr "" msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "Se han encontrado múltiples URL de buscadores torrent. Fallando." -#: nova/virt/block_device.py:241 +#: nova/virt/block_device.py:255 #, python-format msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" "El controlador ha fallado al asignar el volumen %(volume_id)s en " "%(mountpoint)s" -#: nova/virt/block_device.py:363 +#: nova/virt/block_device.py:401 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "Arrancando con el volumen %(volume_id)s en %(mountpoint)s" @@ -7554,31 +7514,31 @@ msgstr "" msgid "Invalid type for %s entry" msgstr "" -#: nova/virt/driver.py:705 +#: nova/virt/driver.py:708 msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" -#: nova/virt/driver.py:1261 +#: nova/virt/driver.py:1264 msgid "Event must be an instance of nova.virt.event.Event" msgstr "El suceso debe ser una instancia de un nova.virt.event.Event" -#: nova/virt/driver.py:1267 +#: nova/virt/driver.py:1270 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "Excepción al asignar el suceso %(event)s: %(ex)s" -#: nova/virt/driver.py:1361 +#: nova/virt/driver.py:1364 msgid "Compute driver option required, but not specified" msgstr "" "La opción de controlador de cálculo es necesaria, pero no se ha " "especificado" -#: nova/virt/driver.py:1364 +#: nova/virt/driver.py:1367 #, python-format msgid "Loading compute driver '%s'" msgstr "Cargando controlador de cálculo '%s' " -#: nova/virt/driver.py:1371 +#: nova/virt/driver.py:1374 msgid "Unable to load the virtualization driver" msgstr "Incapaz de cargar el controlador de virtualización" @@ -7611,22 +7571,22 @@ msgstr "La clave '%(key)s' no está en las instancias '%(inst)s'" msgid "Attempted to unfilter instance which is not filtered" msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada" -#: nova/virt/hardware.py:45 +#: nova/virt/hardware.py:46 #, python-format msgid "No CPUs available after parsing %r" msgstr "CPU's no disponibles después de analizar %r" -#: nova/virt/hardware.py:77 nova/virt/hardware.py:81 +#: nova/virt/hardware.py:78 nova/virt/hardware.py:82 #, python-format msgid "Invalid range expression %r" msgstr "Expresión de intérvalo inválida %" -#: nova/virt/hardware.py:90 +#: nova/virt/hardware.py:91 #, python-format msgid "Invalid exclusion expression %r" msgstr "Expresión de exclusión inválida %r" -#: nova/virt/hardware.py:97 +#: nova/virt/hardware.py:98 #, python-format msgid "Invalid inclusion expression %r" msgstr "Expresión de inclusión inválida %" @@ -8154,22 +8114,22 @@ msgstr "" "Error montaod %(device)s en %(dir)s en imagen %(imgfile)s con libguestfs " "(%(e)s)" -#: nova/virt/disk/vfs/guestfs.py:154 +#: nova/virt/disk/vfs/guestfs.py:156 #, python-format msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)" msgstr "Error al montar %(imgfile)s con libguestfs (%(e)s)" -#: nova/virt/disk/vfs/guestfs.py:170 +#: nova/virt/disk/vfs/guestfs.py:172 #, python-format msgid "Failed to close augeas %s" msgstr "No se ha podido cerrar augeas %s" -#: nova/virt/disk/vfs/guestfs.py:178 +#: nova/virt/disk/vfs/guestfs.py:180 #, python-format msgid "Failed to shutdown appliance %s" msgstr "No se ha podido concluir el dispositivo %s" -#: nova/virt/disk/vfs/guestfs.py:186 +#: nova/virt/disk/vfs/guestfs.py:188 #, python-format msgid "Failed to close guest handle %s" msgstr "No se ha podido cerrar manejador de invitado %s" @@ -8293,6 +8253,13 @@ msgstr "Se ha encontrado un error en la copia del archivo de %(src)s a %(dest)s" msgid "Failed to remove snapshot for VM %s" msgstr "No se ha podido eliminar la instantánea para VM %s" +#: nova/virt/hyperv/utilsfactory.py:68 +msgid "" +"The \"force_hyperv_utils_v1\" option cannot be set to \"True\" on Windows" +" Server / Hyper-V Server 2012 R2 or above as the WMI " +"\"root/virtualization\" namespace is no longer supported." +msgstr "" + #: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64 #, python-format msgid "Unsupported disk format: %s" @@ -8334,12 +8301,12 @@ msgstr "" msgid "Spawning new instance" msgstr "Generando nueva instancia" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:576 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:574 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format \"%s\" no válido" -#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:580 +#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:578 msgid "Using config drive for instance" msgstr "Utilizando dispositivo de configuración para instancia" @@ -8348,7 +8315,7 @@ msgstr "Utilizando dispositivo de configuración para instancia" msgid "Creating config drive at %(path)s" msgstr "Creando unidad de configuración en %(path)s" -#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:605 +#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:603 #, python-format msgid "Creating config drive failed with error: %s" msgstr "La creación de unidad de configuración ha fallado con el error: %s" @@ -8446,21 +8413,21 @@ msgstr "No hay nombres de dispositivo de disco libres para el prefijo '%s'" msgid "Unable to determine disk bus for '%s'" msgstr "No se puede determinar el bus de disco para '%s'" -#: nova/virt/libvirt/driver.py:552 +#: nova/virt/libvirt/driver.py:550 #, python-format msgid "Connection to libvirt lost: %s" msgstr "Conexión hacia libvirt perdida: %s" -#: nova/virt/libvirt/driver.py:741 +#: nova/virt/libvirt/driver.py:739 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "No se puede manejar la solicitud de autenticación para las credenciales %d" -#: nova/virt/libvirt/driver.py:924 +#: nova/virt/libvirt/driver.py:922 msgid "operation time out" msgstr "Tiempo de espera agotado para la operación" -#: nova/virt/libvirt/driver.py:1248 +#: nova/virt/libvirt/driver.py:1246 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " @@ -8469,66 +8436,82 @@ msgstr "" "El volúmen establece el tamaño de bloque, pero el hipervisor libvirt " "actual '%s' no soporta tamaño de bloque personalizado." -#: nova/virt/libvirt/driver.py:1255 +#: nova/virt/libvirt/driver.py:1253 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" "El volúmen establece el tamaño de bloque, pero se requiere libvirt '%s' o" " mayor." -#: nova/virt/libvirt/driver.py:1345 +#: nova/virt/libvirt/driver.py:1351 msgid "Swap only supports host devices" msgstr "El espacio de intercambio solamente soporta dispositivos de anfitrión " -#: nova/virt/libvirt/driver.py:1631 +#: nova/virt/libvirt/driver.py:1638 msgid "libvirt error while requesting blockjob info." msgstr "error de libvirt al solicitar información de blockjob." -#: nova/virt/libvirt/driver.py:1774 +#: nova/virt/libvirt/driver.py:1783 msgid "Found no disk to snapshot." msgstr "No se ha encontrado disco relacionado a instantánea." -#: nova/virt/libvirt/driver.py:1866 +#: nova/virt/libvirt/driver.py:1875 #, python-format msgid "Unknown type: %s" msgstr "Tipo desconocido: %s" -#: nova/virt/libvirt/driver.py:1871 +#: nova/virt/libvirt/driver.py:1880 msgid "snapshot_id required in create_info" msgstr "snapshot_id es requerido en create_info" -#: nova/virt/libvirt/driver.py:1929 +#: nova/virt/libvirt/driver.py:1938 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" "Libvirt '%s' o mayor se requiere para remoción en línea de instantáneas " "de volumen." -#: nova/virt/libvirt/driver.py:1936 +#: nova/virt/libvirt/driver.py:1945 #, python-format msgid "Unknown delete_info type %s" msgstr "Tipo delete_info %s desconocido" -#: nova/virt/libvirt/driver.py:1964 +#: nova/virt/libvirt/driver.py:1981 #, python-format msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2406 nova/virt/xenapi/vmops.py:1561 +#: nova/virt/libvirt/driver.py:1990 +msgid "filename cannot be None" +msgstr "" + +#: nova/virt/libvirt/driver.py:2019 +#, python-format +msgid "no match found for %s" +msgstr "" + +#: nova/virt/libvirt/driver.py:2076 +#, python-format +msgid "" +"Relative blockcommit support was not detected. Libvirt '%s' or later is " +"required for online deletion of network storage-backed volume snapshots." +msgstr "" + +#: nova/virt/libvirt/driver.py:2491 nova/virt/xenapi/vmops.py:1561 msgid "Guest does not have a console available" msgstr "El invitado no tiene una consola disponible" -#: nova/virt/libvirt/driver.py:2735 +#: nova/virt/libvirt/driver.py:2820 #, python-format msgid "%s format is not supported" msgstr "" -#: nova/virt/libvirt/driver.py:2841 +#: nova/virt/libvirt/driver.py:2926 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "La remoción de dispositivos PCI con libvirt < %(ver)s no está permitida" -#: nova/virt/libvirt/driver.py:2984 +#: nova/virt/libvirt/driver.py:3069 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " @@ -8537,19 +8520,19 @@ msgstr "" "La configuración ha solicitado un modelo CPU explícito, pero el " "hipervisor libvirt actual '%s' no soporta la selección de modelos de CPU" -#: nova/virt/libvirt/driver.py:2990 +#: nova/virt/libvirt/driver.py:3075 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" "La configuración ha solicitado un modelo de CPU personalizado, pero no se" " ha proporcionado ningún nombre de modelo" -#: nova/virt/libvirt/driver.py:2994 +#: nova/virt/libvirt/driver.py:3079 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" "No se debe establecer un nombre de modelo de CPU cuando se solicita un " "modelo de CPU de host" -#: nova/virt/libvirt/driver.py:3586 +#: nova/virt/libvirt/driver.py:3689 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " @@ -8558,7 +8541,7 @@ msgstr "" "Error de libvirt durante la búsqueda de %(instance_id)s: [Código de Error" " %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3607 +#: nova/virt/libvirt/driver.py:3710 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " @@ -8567,27 +8550,27 @@ msgstr "" "Error de libvirt al buscar %(instance_name)s: [Código de error " "%(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:3873 +#: nova/virt/libvirt/driver.py:3976 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "Configuración vcpu_pin_set inválida, fuera de rango de cpu de hipervisor." -#: nova/virt/libvirt/driver.py:3998 +#: nova/virt/libvirt/driver.py:4101 msgid "libvirt version is too old (does not support getVersion)" msgstr "La versión libvirt es demasiado antigua (no soporta getVersion)" -#: nova/virt/libvirt/driver.py:4359 +#: nova/virt/libvirt/driver.py:4462 msgid "Block migration can not be used with shared storage." msgstr "" "No se puede utilizar la migración de bloque con almacenamiento " "compartido. " -#: nova/virt/libvirt/driver.py:4368 +#: nova/virt/libvirt/driver.py:4471 msgid "Live migration can not be used without shared storage." msgstr "" "No se puede utilizar la migración en directo con almacenamiento " "compartido." -#: nova/virt/libvirt/driver.py:4438 +#: nova/virt/libvirt/driver.py:4541 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " @@ -8597,7 +8580,7 @@ msgstr "" "demasiado grande (disponible en host de destino: %(available)s < " "necesario: %(necessary)s)" -#: nova/virt/libvirt/driver.py:4477 +#: nova/virt/libvirt/driver.py:4580 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -8612,12 +8595,12 @@ msgstr "" "\n" "Consulte %(u)s" -#: nova/virt/libvirt/driver.py:4540 +#: nova/virt/libvirt/driver.py:4643 #, python-format msgid "The firewall filter for %s does not exist" msgstr "El filtro de cortafuegos para %s no existe " -#: nova/virt/libvirt/driver.py:4603 +#: nova/virt/libvirt/driver.py:4706 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " "or your destination node does not support retrieving listen addresses. " @@ -8626,7 +8609,7 @@ msgid "" "address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." msgstr "" -#: nova/virt/libvirt/driver.py:4620 +#: nova/virt/libvirt/driver.py:4723 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," " and the graphics (VNC and/or SPICE) listen addresses on the destination" @@ -8636,7 +8619,7 @@ msgid "" "succeed, but the VM will continue to listen on the current addresses." msgstr "" -#: nova/virt/libvirt/driver.py:4997 +#: nova/virt/libvirt/driver.py:5100 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" @@ -8645,7 +8628,7 @@ msgstr "" "Error de libvirt al obtener la descripción de %(instance_name)s: [Código " "de error %(error_code)s] %(ex)s" -#: nova/virt/libvirt/driver.py:5123 +#: nova/virt/libvirt/driver.py:5226 msgid "Unable to resize disk down." msgstr "Incapaz de reducir el tamaño del disco." @@ -8713,19 +8696,19 @@ msgstr "La vía de acceso %s debe ser el volumen lógico LVM" msgid "volume_clear='%s' is not handled" msgstr "volume_clear='%s' no está manejado" -#: nova/virt/libvirt/rbd.py:104 +#: nova/virt/libvirt/rbd_utils.py:104 msgid "rbd python libraries not found" msgstr "Las librerías rbd python no han sido encontradas" -#: nova/virt/libvirt/rbd.py:159 +#: nova/virt/libvirt/rbd_utils.py:159 msgid "Not stored in rbd" msgstr "No está almacenado en rbd" -#: nova/virt/libvirt/rbd.py:163 +#: nova/virt/libvirt/rbd_utils.py:163 msgid "Blank components" msgstr "Componentes en blanco" -#: nova/virt/libvirt/rbd.py:166 +#: nova/virt/libvirt/rbd_utils.py:166 msgid "Not an rbd snapshot" msgstr "No es una instantánea rbd" @@ -8739,15 +8722,15 @@ msgstr "" "No se puede recuperar la vía de acceso ed dispositivo raíz de la " "configuración de libvirt de instancia" -#: nova/virt/libvirt/vif.py:338 nova/virt/libvirt/vif.py:545 -#: nova/virt/libvirt/vif.py:709 +#: nova/virt/libvirt/vif.py:322 nova/virt/libvirt/vif.py:508 +#: nova/virt/libvirt/vif.py:652 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "El parámetro vif_type debe estar presente para esta implementación de " "vif_driver" -#: nova/virt/libvirt/vif.py:344 nova/virt/libvirt/vif.py:551 -#: nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:328 nova/virt/libvirt/vif.py:514 +#: nova/virt/libvirt/vif.py:658 #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type=%s inesperado" @@ -8770,27 +8753,33 @@ msgstr "No se puede localizar ningún dispositivo de canal de fibra" msgid "Fibre Channel device not found." msgstr "No se ha encontrado el dispositivo de canal de fibra." -#: nova/virt/vmwareapi/driver.py:127 +#: nova/virt/vmwareapi/driver.py:125 +msgid "" +"Must specify host_ip, host_username and host_password to use " +"vmwareapi.VMwareVCDriver" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:134 #, python-format msgid "Invalid Regular Expression %s" msgstr "La expresión regular %s es inválida" -#: nova/virt/vmwareapi/driver.py:141 +#: nova/virt/vmwareapi/driver.py:148 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "Todos los clusters especificados %s no fueron encontrados en vCenter" -#: nova/virt/vmwareapi/driver.py:319 +#: nova/virt/vmwareapi/driver.py:342 #, python-format msgid "The resource %s does not exist" msgstr "El recurso %s no existe" -#: nova/virt/vmwareapi/driver.py:381 +#: nova/virt/vmwareapi/driver.py:404 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "Cluster o nombre de pool de recursos inválido: %s" -#: nova/virt/vmwareapi/driver.py:555 +#: nova/virt/vmwareapi/driver.py:582 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." @@ -8799,22 +8788,22 @@ msgstr "" "vCenter de VMware; por lo tanto no se puede regresar tiempo de ejecución " "solamente para un huésped." -#: nova/virt/vmwareapi/driver.py:678 +#: nova/virt/vmwareapi/driver.py:705 #, python-format msgid "Unable to validate session %s!" msgstr "Incapaz de validar sesión %s!" -#: nova/virt/vmwareapi/driver.py:720 +#: nova/virt/vmwareapi/driver.py:747 #, python-format msgid "Session %s is inactive!" msgstr "La sesión %s se encuentra inactiva!" -#: nova/virt/vmwareapi/driver.py:811 +#: nova/virt/vmwareapi/driver.py:838 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "Tarea [%(task_name)s] %(task_ref)s estado: error %(error_info)s" -#: nova/virt/vmwareapi/driver.py:821 +#: nova/virt/vmwareapi/driver.py:848 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "En vmwareapi:_poll_task, se ha obtenido este error %s" @@ -9037,19 +9026,19 @@ msgstr "" msgid "Extending virtual disk failed with error: %s" msgstr "La extensión del disco virtual ha fallado con el error: %s" -#: nova/virt/vmwareapi/vmops.py:253 +#: nova/virt/vmwareapi/vmops.py:252 msgid "Image disk size greater than requested disk size" msgstr "La imagen de disco es más grande que el tamaño del disco solicitado" -#: nova/virt/vmwareapi/vmops.py:861 +#: nova/virt/vmwareapi/vmops.py:859 msgid "instance is not powered on" msgstr "instancia no activada" -#: nova/virt/vmwareapi/vmops.py:889 +#: nova/virt/vmwareapi/vmops.py:887 msgid "Instance does not exist on backend" msgstr "" -#: nova/virt/vmwareapi/vmops.py:916 +#: nova/virt/vmwareapi/vmops.py:914 #, python-format msgid "" "In vmwareapi:vmops:_destroy_instance, got this exception while un-" @@ -9058,33 +9047,33 @@ msgstr "" "En vmwareapi:vmops:_destroy_instance, se obtuvo esta excepción mientras " "se removía el registro de VM: %s" -#: nova/virt/vmwareapi/vmops.py:939 +#: nova/virt/vmwareapi/vmops.py:937 msgid "" "In vmwareapi:vmops:_destroy_instance, exception while deleting the VM " "contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:971 +#: nova/virt/vmwareapi/vmops.py:969 msgid "pause not supported for vmwareapi" msgstr "pausa no soportada para vmwareapi" -#: nova/virt/vmwareapi/vmops.py:975 +#: nova/virt/vmwareapi/vmops.py:973 msgid "unpause not supported for vmwareapi" msgstr "cancelación de pausa no soportada para vmwareapi" -#: nova/virt/vmwareapi/vmops.py:993 +#: nova/virt/vmwareapi/vmops.py:991 msgid "instance is powered off and cannot be suspended." msgstr "instancia está desactivada y no se puede suspender. " -#: nova/virt/vmwareapi/vmops.py:1013 +#: nova/virt/vmwareapi/vmops.py:1011 msgid "instance is not in a suspended state" msgstr "la instancia no está en un estado suspendido" -#: nova/virt/vmwareapi/vmops.py:1113 +#: nova/virt/vmwareapi/vmops.py:1111 msgid "Unable to shrink disk." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1172 +#: nova/virt/vmwareapi/vmops.py:1170 #, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" @@ -9093,23 +9082,23 @@ msgstr "" "En vmwareapi:vmops:confirm_migration, se ha obtenido esta excepción al " "destruir la máquina virtual: %s" -#: nova/virt/vmwareapi/vmops.py:1248 nova/virt/xenapi/vmops.py:1500 +#: nova/virt/vmwareapi/vmops.py:1246 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" "Se han encontrado %(instance_count)d rearranques colgados de más de " "%(timeout)d segundos" -#: nova/virt/vmwareapi/vmops.py:1252 nova/virt/xenapi/vmops.py:1504 +#: nova/virt/vmwareapi/vmops.py:1250 nova/virt/xenapi/vmops.py:1504 msgid "Automatically hard rebooting" msgstr "Rearrancando automáticamente de forma permanente" -#: nova/virt/vmwareapi/vmops.py:1570 +#: nova/virt/vmwareapi/vmops.py:1568 #, python-format msgid "No device with interface-id %s exists on VM" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1580 +#: nova/virt/vmwareapi/vmops.py:1578 #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-error.po b/nova/locale/fr/LC_MESSAGES/nova-log-error.po index e4ad002caf..55a80387db 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" @@ -196,7 +196,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -205,99 +205,139 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Impossible d'avertir les cellules de l'erreur d'instance" @@ -389,116 +429,116 @@ msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -515,17 +555,17 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" @@ -535,19 +575,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -566,18 +606,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-info.po b/nova/locale/fr/LC_MESSAGES/nova-log-info.po index 6d7f745448..c7475440ae 100644 --- a/nova/locale/fr/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/fr/LC_MESSAGES/nova-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" -"PO-Revision-Date: 2014-08-07 07:51+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" +"PO-Revision-Date: 2014-08-15 05:00+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/nova/language/" "fr/)\n" @@ -44,7 +44,12 @@ msgstr "Exception HTTP générée : %s" msgid "Deleting network with id %s" msgstr "Suppression du réseau avec l'ID %s" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "Restauration de l'état original de la machine virtuelle : '%s'" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,97 +151,102 @@ msgstr "Suppression ligne en double avec l'ID : %(id)s de la table : %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "Instance détruite avec succès." -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "L'instance peut être redémarrée." -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "Tentative de redestruction de l'instance." -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "Démarrage du processus d'instantané en temps réel" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "Démarrage du processus d'instantané à froid" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "Instantané extrait, démarrage du téléchargement d'image" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "Téléchargement d'image instantanée terminé" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "Instance redémarrée par logiciel avec succès." -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "L'instance s'est arrêtée avec succès." -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "L'instance a sans doute été redémarrée par logiciel ; retour en cours." -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "L'instance a redémarré avec succès." -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "Instance générée avec succès." -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Journal de console tronqué retourné, %d octets ignorés" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "Création de l'image" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "Utilisation de l'unité de config" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "Création de l'unité de config à %(path)s" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" @@ -246,7 +256,7 @@ msgstr "" "être détaché. Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s " "Erreur=%(e)s" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -255,26 +265,26 @@ msgstr "" "Domaine introuvable dans libvirt pour l'instance %s. Impossible d'obtenir " "les stats de bloc pour l'unité" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "L'instance s'exécute avec succès." -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -292,12 +302,12 @@ msgid "Attempted to unfilter instance which is not filtered" msgstr "" "Vous avez essayé d'annuler le filtre d'une instance qui n'est pas filtrée" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "Ecriture d'informations stockées dans %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" @@ -305,27 +315,27 @@ msgstr "" "image %(id)s à (%(base_file)s) : vérification d'image ignorée, aucun hachage " "stocké" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s) : génération d'un total de contrôle" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "Fichier de base trop jeune pour un retrait : %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "Retrait du fichier de base : %s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "image %(id)s à (%(base_file)s) : vérification" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -335,17 +345,17 @@ msgstr "" "%(local)d local, %(remote)d sur d'autres noeuds partageant ce stockage " "d'instance" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "Fichiers de base actifs : %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "Fichiers de base endommagés : %s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "Fichiers de base pouvant être retirés : %s" diff --git a/nova/locale/it/LC_MESSAGES/nova-log-info.po b/nova/locale/it/LC_MESSAGES/nova-log-info.po index 8449d18757..23c0c4f2e5 100644 --- a/nova/locale/it/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/it/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-08-07 07:51+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Italian (http://www.transifex.com/projects/p/nova/language/" @@ -44,7 +44,12 @@ msgstr "Generata eccezione HTTP: %s" msgid "Deleting network with id %s" msgstr "Eliminazione della rete con id %s" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -147,106 +152,111 @@ msgstr "Cancellata riga duplicata con id: %(id)s dalla tablella: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "Istanza distrutta correttamente." -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "L'istanza può essere avviata di nuovo." -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "L'istanza verrà nuovamente distrutta." -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "Inizio processo attivo istantanea" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "Inizio processo di istantanea a freddo" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "Istantanea estratta, inizio caricamento immagine" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "Caricamento immagine istantanea completato" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "Avvio a caldo dell'istanza eseguito correttamente." -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "Chiusura dell'istanza eseguita correttamente." -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "L'istanza potrebbe essere stat riavviata durante l'avvio a caldo, quindi " "ritornare adesso." -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "Istanza riavviata correttamente." -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "Istanza generata correttamente." -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "dati: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Restituito log della console troncato, %d byte ignorati" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "Creazione immagine" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "Utilizzo unità di config" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "Creazione unità config in %(path)s" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -255,26 +265,26 @@ msgstr "" "Impossibile trovare il dominio in libvirt per l'istanza %s. Impossibile " "ottenere le statistiche del blocco per l'unità" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "Istanza in esecuzione correttamente." -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -291,12 +301,12 @@ msgstr "Controllo dei filtri statici" msgid "Attempted to unfilter instance which is not filtered" msgstr "Si è tentato di rimuovere il filtro da un'istanza senza filtro" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "Scrittura informazioni memorizzate in %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" @@ -304,27 +314,27 @@ msgstr "" "immagine %(id)s in (%(base_file)s): verifica dell'immagine ignorata, nessun " "hash memorizzato" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s): generazione checksum" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "File di base troppo recente per essere rimosso: %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "Rimozione del file di base: %s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "immagine %(id)s in (%(base_file)s): verifica" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -333,17 +343,17 @@ msgstr "" "immagine %(id)s in (%(base_file)s): in uso: in questo nodo %(local)d locale, " "%(remote)d in altri nodi che condividono questa archiviazione dell'istanza" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "File di base attivi: %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "File di base danneggiato: %s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "File di base rimovibili: %s" diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-error.po b/nova/locale/ja/LC_MESSAGES/nova-log-error.po index 6dbab52058..a30dee0f20 100644 --- a/nova/locale/ja/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/ja/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: 2014-06-20 16:41+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/" @@ -196,7 +196,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -205,99 +205,139 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "インスタンスの障害をセルに通知できませんでした" @@ -389,116 +429,116 @@ msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -515,19 +555,19 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" "イメージ情報ファイル %(filename)s の読み取り中にエラーが発生しました: " "%(error)s" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "(%(base_file)s) にあるイメージ %(id)s: イメージの検査が失敗しました" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "%(base_file)s の削除に失敗しました。エラーは %(error)s" @@ -537,19 +577,19 @@ msgstr "%(base_file)s の削除に失敗しました。エラーは %(error)s" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -568,18 +608,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-info.po b/nova/locale/ja/LC_MESSAGES/nova-log-info.po index 4e059ace67..7af3d0293b 100644 --- a/nova/locale/ja/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/ja/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-06-30 04:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,108 +151,113 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "インスタンスが正常に破棄されました。" -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "インスタンスを再び開始できます。" -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "インスタンスの破棄を再び行います。" -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "ライブ・スナップショット・プロセスを開始しています" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "コールド・スナップショット・プロセスを開始しています" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "" "スナップショットが抽出されました。イメージのアップロードを開始しています" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "スナップショット・イメージのアップロードが完了しました" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "インスタンスが正常にソフト・リブートされました。" -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "インスタンスが正常にシャットダウンされました。" -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "インスタンスはソフト・リブート時にリブートされた可能性があるため、ここで返し" "ます。" -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "インスタンスが正常にリブートされました。" -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "インスタンスが正常に作成されました。" -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "データ: %(data)r, ファイルパス: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" "切り捨てられたコンソール・ログが返されました。%d バイトが無視されました" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "イメージの作成中" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "構成ドライブを使用中" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "構成ドライブを %(path)s に作成しています" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -256,26 +266,26 @@ msgstr "" "インスタンス %s 用のドメインが Libvirt 内で見つかりませんでした。デバイスのブ" "ロックの統計を取得できません" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "インスタンスが正常に実行されています。" -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -294,12 +304,12 @@ msgstr "" "フィルター処理されていないインスタンスに対してフィルター処理の取り消しが試み" "られました" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "保管された情報を %s に書き込んでいます" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" @@ -307,27 +317,27 @@ msgstr "" "(%(base_file)s) にあるイメージ %(id)s: イメージの検査がスキップされました。" "ハッシュは保管されていません" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s): チェックサムの生成中" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "基本ファイルは新しいため削除できません: %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "基本ファイルを削除しています: %s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "(%(base_file)s) にあるイメージ %(id)s: 検査中" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -336,17 +346,17 @@ msgstr "" "(%(base_file)s) にあるイメージ %(id)s: 使用中: このノード上では %(local)d " "ローカル、このインスタンスのストレージを共有する他のノード上では %(remote)d" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "アクティブな基本ファイル: %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "破損した基本ファイル: %s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "削除可能な基本ファイル: %s" diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po index 8e2b069f4e..f4e330f39e 100644 --- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: 2014-06-16 04:10+0000\n" "Last-Translator: jaekwon.park \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/" @@ -197,7 +197,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -206,99 +206,139 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "셀에 인스턴스 결함을 알리지 못했음" @@ -390,116 +430,116 @@ msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -516,17 +556,17 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" @@ -536,19 +576,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -567,18 +607,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po index 20c80f17cd..f206474a16 100644 --- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-06-30 04:40+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,105 +151,110 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "인스턴스가 영구 삭제되었습니다. " -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "인스턴스가 다시 시작됩니다." -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "인스턴스를 다시 영구 삭제하려 합니다." -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "라이브 스냅샷 프로세스 시작 중" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "콜드 스냅샷 프로세스 시작 중" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "스냅샷 추출, 이미지 업로드 시작 중" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "스냅샷 이미지 업로드 완료" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "인스턴스가 소프트 리부트되었습니다. " -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "인스턴스가 시스템 종료되었습니다. " -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "인스턴스가 소프트 리부트 중에 다시 부팅되었을 수 있으므로, 지금 리턴합니다. " -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "인스턴스가 다시 부트되었습니다. " -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "인스턴스가 파생되었습니다. " -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "데이터: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "잘린 콘솔 로그가 리턴되었으며, %d 바이트는 무시됨" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "이미지 작성 중" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "구성 드라이브 사용 중" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "%(path)s에 구성 드라이브 작성 중" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -253,26 +263,26 @@ msgstr "" "%s 인스턴스에 대한 libvirt에서 도메인을 찾을 수 없습니다. 디바이스의 블록 통" "계를 가져올 수 없음" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "인스턴스가 정상적으로 실행 중입니다. " -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -289,39 +299,39 @@ msgstr "정적 필터 확인 중" msgid "Attempted to unfilter instance which is not filtered" msgstr "필터링되지 않는 인스턴스를 필터링 해제하려고 했음" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "%s에 저장된 정보 기록 중" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" msgstr "" "(%(base_file)s)의 이미지 %(id)s: 이미지 검증 건너뜀. 해시가 저장되지 않음" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s): 체크섬 생성 중" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "기본 파일이 제거하기엔 너무 신생임: %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "기본 파일 제거 중: %s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "(%(base_file)s)의 이미지 %(id)s: 검사 중" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -330,17 +340,17 @@ msgstr "" "(%(base_file)s)의 이미지 %(id)s: 사용 중. 이 노드의 %(local)d 로컬과 다른 노" "드의 %(remote)d이(가) 이 인스턴스 스노리지를 공유함" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "활성 기본 파일: %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "손상된 기본 파일: %s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "제거 가능한 기본 파일: %s" diff --git a/nova/locale/nova-log-error.pot b/nova/locale/nova-log-error.pot index 7801bdb8cf..862a79a349 100644 --- a/nova/locale/nova-log-error.pot +++ b/nova/locale/nova-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" +"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -194,7 +194,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -203,98 +203,138 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "" @@ -387,118 +427,118 @@ msgid "" "%(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to " "Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to " "Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to " "take effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -515,17 +555,17 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" @@ -535,19 +575,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -566,17 +606,17 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/nova-log-info.pot b/nova/locale/nova-log-info.pot index 4f11257f2b..adeb552085 100644 --- a/nova/locale/nova-log-info.pot +++ b/nova/locale/nova-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" +"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -42,7 +42,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" @@ -143,131 +148,136 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: " "%(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. " "Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats " "for device" msgstr "" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -284,56 +294,56 @@ msgstr "" msgid "Attempted to unfilter instance which is not filtered" msgstr "" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash " "stored" msgstr "" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " "%(remote)d on other nodes sharing this instance storage" msgstr "" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "" diff --git a/nova/locale/nova-log-warning.pot b/nova/locale/nova-log-warning.pot index 0e497fc11b..95d18da37a 100644 --- a/nova/locale/nova-log-warning.pot +++ b/nova/locale/nova-log-warning.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" +"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -32,7 +32,7 @@ msgid "" " will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/api/ec2/cloud.py:1289 +#: nova/api/ec2/cloud.py:1290 #: nova/api/openstack/compute/contrib/floating_ips.py:254 #, python-format msgid "multiple fixed_ips exist, using the first: %s" @@ -114,16 +114,16 @@ msgstr "" msgid "Instance has had its instance_type removed from the DB" msgstr "" -#: nova/compute/manager.py:2016 +#: nova/compute/manager.py:2023 msgid "No more network or fixed IP to be allocated" msgstr "" -#: nova/compute/manager.py:2256 +#: nova/compute/manager.py:2263 #, python-format msgid "Ignoring EndpointNotFound: %s" msgstr "" -#: nova/compute/manager.py:2274 +#: nova/compute/manager.py:2281 #, python-format msgid "Failed to delete volume: %(volume_id)s due to %(exc)s" msgstr "" @@ -163,23 +163,39 @@ msgstr "" msgid "Instance: %(instance_uuid)s failed to save into memcached" msgstr "" -#: nova/network/neutronv2/api.py:214 +#: nova/network/neutronv2/api.py:218 #, python-format msgid "Neutron error: Port quota exceeded in tenant: %s" msgstr "" -#: nova/network/neutronv2/api.py:219 +#: nova/network/neutronv2/api.py:223 #, python-format msgid "Neutron error: No more fixed IPs in network: %s" msgstr "" -#: nova/network/neutronv2/api.py:223 +#: nova/network/neutronv2/api.py:227 #, python-format msgid "" "Neutron error: MAC address %(mac)s is already in use on network " "%(network)s." msgstr "" +#: nova/network/neutronv2/api.py:302 +msgid "No network configured!" +msgstr "" + +#: nova/network/neutronv2/api.py:497 +#, python-format +msgid "Port %s does not exist" +msgstr "" + +#: nova/network/neutronv2/api.py:1160 +#, python-format +msgid "" +"Network %(id)s not matched with the tenants network! The ports tenant " +"%(tenant_id)s will be used." +msgstr "" + #: nova/openstack/common/loopingcall.py:87 #, python-format msgid "task %(func_name)s run outlasted interval by %(delay).2f sec" @@ -239,6 +255,21 @@ msgstr "" msgid "Could not decode ram_allocation_ratio: '%s'" msgstr "" +#: nova/storage/linuxscsi.py:100 +#, python-format +msgid "Multipath call failed exit (%(code)s)" +msgstr "" + +#: nova/storage/linuxscsi.py:121 +#, python-format +msgid "Couldn't find multipath device %s" +msgstr "" + +#: nova/storage/linuxscsi.py:130 +#, python-format +msgid "Skip faulty line \"%(dev_line)s\" of multipath device %(mdev)s" +msgstr "" + #: nova/virt/disk/api.py:366 #, python-format msgid "Ignoring error injecting data into image %(image)s (%(e)s)" @@ -253,12 +284,12 @@ msgstr "" msgid "Unable to import guestfs, falling back to VFSLocalFS" msgstr "" -#: nova/virt/libvirt/driver.py:370 +#: nova/virt/libvirt/driver.py:376 #, python-format msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s." msgstr "" -#: nova/virt/libvirt/driver.py:616 +#: nova/virt/libvirt/driver.py:614 #, python-format msgid "" "The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack " @@ -266,122 +297,122 @@ msgid "" "see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix" msgstr "" -#: nova/virt/libvirt/driver.py:673 +#: nova/virt/libvirt/driver.py:671 #, python-format msgid "URI %(uri)s does not support events: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:689 +#: nova/virt/libvirt/driver.py:687 #, python-format msgid "URI %(uri)s does not support connection events: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:921 +#: nova/virt/libvirt/driver.py:919 msgid "Cannot destroy instance, operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:945 +#: nova/virt/libvirt/driver.py:943 msgid "During wait destroy, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1029 +#: nova/virt/libvirt/driver.py:1027 msgid "Instance may be still running, destroy it again." msgstr "" -#: nova/virt/libvirt/driver.py:1082 +#: nova/virt/libvirt/driver.py:1080 #, python-format msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s" msgstr "" -#: nova/virt/libvirt/driver.py:1132 +#: nova/virt/libvirt/driver.py:1130 #, python-format msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually" msgstr "" -#: nova/virt/libvirt/driver.py:1408 nova/virt/libvirt/driver.py:1416 +#: nova/virt/libvirt/driver.py:1414 nova/virt/libvirt/driver.py:1422 msgid "During detach_volume, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:1461 +#: nova/virt/libvirt/driver.py:1467 msgid "During detach_interface, instance disappeared." msgstr "" -#: nova/virt/libvirt/driver.py:2051 +#: nova/virt/libvirt/driver.py:2136 msgid "Failed to soft reboot instance. Trying hard reboot." msgstr "" -#: nova/virt/libvirt/driver.py:2608 +#: nova/virt/libvirt/driver.py:2693 #, python-format msgid "Image %s not found on disk storage. Continue without injecting data" msgstr "" -#: nova/virt/libvirt/driver.py:2795 +#: nova/virt/libvirt/driver.py:2880 msgid "File injection into a boot from volume instance is not supported" msgstr "" -#: nova/virt/libvirt/driver.py:2870 +#: nova/virt/libvirt/driver.py:2955 msgid "Instance disappeared while detaching a PCI device from it." msgstr "" -#: nova/virt/libvirt/driver.py:2925 +#: nova/virt/libvirt/driver.py:3010 #, python-format msgid "Cannot update service status on host: %s,since it is not registered." msgstr "" -#: nova/virt/libvirt/driver.py:2928 +#: nova/virt/libvirt/driver.py:3013 #, python-format msgid "Cannot update service status on host: %s,due to an unexpected exception." msgstr "" -#: nova/virt/libvirt/driver.py:2956 +#: nova/virt/libvirt/driver.py:3041 #, python-format msgid "URI %(uri)s does not support full set of host capabilities: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:3785 +#: nova/virt/libvirt/driver.py:3888 #, python-format msgid "Timeout waiting for vif plugging callback for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:3806 +#: nova/virt/libvirt/driver.py:3909 #, python-format msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3863 +#: nova/virt/libvirt/driver.py:3966 msgid "" "Cannot get the number of cpu, because this function is not implemented " "for this platform. " msgstr "" -#: nova/virt/libvirt/driver.py:3925 +#: nova/virt/libvirt/driver.py:4028 #, python-format msgid "couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3956 +#: nova/virt/libvirt/driver.py:4059 #, python-format msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:4158 +#: nova/virt/libvirt/driver.py:4261 #, python-format msgid "URI %(uri)s does not support listDevices: %(error)s" msgstr "" -#: nova/virt/libvirt/driver.py:4813 +#: nova/virt/libvirt/driver.py:4916 #, python-format msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d." msgstr "" -#: nova/virt/libvirt/driver.py:5023 +#: nova/virt/libvirt/driver.py:5126 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:5031 +#: nova/virt/libvirt/driver.py:5134 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -389,7 +420,7 @@ msgid "" "resize." msgstr "" -#: nova/virt/libvirt/driver.py:5037 +#: nova/virt/libvirt/driver.py:5140 #, python-format msgid "" "Periodic task is updating the host stat, it is trying to get disk " @@ -403,21 +434,21 @@ msgid "" "correctly." msgstr "" -#: nova/virt/libvirt/imagecache.py:318 +#: nova/virt/libvirt/imagecache.py:317 #, python-format msgid "" "Instance %(instance)s is using a backing file %(backing)s which does not " "appear in the image service" msgstr "" -#: nova/virt/libvirt/imagecache.py:495 +#: nova/virt/libvirt/imagecache.py:494 #, python-format msgid "" "image %(id)s at (%(base_file)s): warning -- an absent base file is in " "use! instances: %(instance_list)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:545 +#: nova/virt/libvirt/imagecache.py:544 #, python-format msgid "Unknown base file: %s" msgstr "" @@ -430,7 +461,7 @@ msgid "" "%(free_space)db." msgstr "" -#: nova/virt/libvirt/rbd.py:268 +#: nova/virt/libvirt/rbd_utils.py:268 #, python-format msgid "rbd remove %(volume)s in pool %(pool)s failed" msgstr "" @@ -480,35 +511,40 @@ msgid "" "Try number: %(tries)s" msgstr "" -#: nova/virt/libvirt/volume.py:1036 +#: nova/virt/libvirt/volume.py:995 +#, python-format +msgid "multipath-tools probably work improperly. devices to remove = %s." +msgstr "" + +#: nova/virt/libvirt/volume.py:1040 msgid "Value required for 'scality_sofs_config'" msgstr "" -#: nova/virt/libvirt/volume.py:1047 +#: nova/virt/libvirt/volume.py:1051 #, python-format msgid "Cannot access 'scality_sofs_config': %s" msgstr "" -#: nova/virt/libvirt/volume.py:1053 +#: nova/virt/libvirt/volume.py:1057 msgid "Cannot execute /sbin/mount.sofs" msgstr "" -#: nova/virt/libvirt/volume.py:1068 +#: nova/virt/libvirt/volume.py:1072 msgid "Cannot mount Scality SOFS, check syslog for errors" msgstr "" -#: nova/virt/vmwareapi/driver.py:95 +#: nova/virt/vmwareapi/driver.py:96 msgid "" "The VMware ESX driver is now deprecated and has been removed in the Juno " "release. The VC driver will remain and continue to be supported." msgstr "" -#: nova/virt/vmwareapi/driver.py:150 +#: nova/virt/vmwareapi/driver.py:157 #, python-format msgid "The following clusters could not be found in the vCenter %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:179 +#: nova/virt/vmwareapi/driver.py:202 msgid "Instance cannot be found in host, or in an unknownstate." msgstr "" diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot index b431f5ef8d..085cecbd31 100644 --- a/nova/locale/nova.pot +++ b/nova/locale/nova.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n" +"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -37,19 +37,23 @@ msgstr "" msgid "Invalid source_type field." msgstr "" -#: nova/block_device.py:192 +#: nova/block_device.py:191 +msgid "Invalid device UUID." +msgstr "" + +#: nova/block_device.py:195 msgid "Missing device UUID." msgstr "" -#: nova/block_device.py:371 +#: nova/block_device.py:374 msgid "Device name empty or too long." msgstr "" -#: nova/block_device.py:375 +#: nova/block_device.py:378 msgid "Device name contains spaces." msgstr "" -#: nova/block_device.py:385 +#: nova/block_device.py:388 msgid "Invalid volume_size." msgstr "" @@ -1828,6 +1832,43 @@ msgstr "" msgid "Architecture name '%(arch)s' is not recognised" msgstr "" +#: nova/exception.py:1645 +msgid "CPU and memory allocation must be provided for all NUMA nodes" +msgstr "" + +#: nova/exception.py:1650 +#, python-format +msgid "" +"Image property '%(name)s' is not permitted to override NUMA configuration" +" set against the flavor" +msgstr "" + +#: nova/exception.py:1655 +msgid "" +"Asymmetric NUMA topologies require explicit assignment of CPUs and memory" +" to nodes in image or flavor" +msgstr "" + +#: nova/exception.py:1660 +#, python-format +msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" +msgstr "" + +#: nova/exception.py:1664 +#, python-format +msgid "CPU number %(cpunum)d is assigned to two nodes" +msgstr "" + +#: nova/exception.py:1668 +#, python-format +msgid "CPU number %(cpuset)s is not assigned to any node" +msgstr "" + +#: nova/exception.py:1672 +#, python-format +msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" +msgstr "" + #: nova/filters.py:84 #, python-format msgid "Filter %s returned 0 hosts" @@ -2063,208 +2104,208 @@ msgstr "" msgid "Unknown error occurred." msgstr "" -#: nova/api/ec2/cloud.py:392 +#: nova/api/ec2/cloud.py:391 #, python-format msgid "Create snapshot of volume %s" msgstr "" -#: nova/api/ec2/cloud.py:417 +#: nova/api/ec2/cloud.py:418 #, python-format msgid "Could not find key pair(s): %s" msgstr "" -#: nova/api/ec2/cloud.py:433 +#: nova/api/ec2/cloud.py:434 #, python-format msgid "Create key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:445 +#: nova/api/ec2/cloud.py:446 #, python-format msgid "Import key %s" msgstr "" -#: nova/api/ec2/cloud.py:458 +#: nova/api/ec2/cloud.py:459 #, python-format msgid "Delete key pair %s" msgstr "" -#: nova/api/ec2/cloud.py:600 nova/api/ec2/cloud.py:730 +#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:731 msgid "need group_name or group_id" msgstr "" -#: nova/api/ec2/cloud.py:605 +#: nova/api/ec2/cloud.py:606 msgid "can't build a valid rule" msgstr "" -#: nova/api/ec2/cloud.py:613 +#: nova/api/ec2/cloud.py:614 #, python-format msgid "Invalid IP protocol %(protocol)s" msgstr "" -#: nova/api/ec2/cloud.py:647 nova/api/ec2/cloud.py:683 +#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:684 msgid "No rule for the specified parameters." msgstr "" -#: nova/api/ec2/cloud.py:761 +#: nova/api/ec2/cloud.py:762 #, python-format msgid "Get console output for instance %s" msgstr "" -#: nova/api/ec2/cloud.py:833 +#: nova/api/ec2/cloud.py:834 #, python-format msgid "Create volume from snapshot %s" msgstr "" -#: nova/api/ec2/cloud.py:837 nova/api/openstack/compute/contrib/volumes.py:243 +#: nova/api/ec2/cloud.py:838 nova/api/openstack/compute/contrib/volumes.py:243 #, python-format msgid "Create volume of %s GB" msgstr "" -#: nova/api/ec2/cloud.py:877 +#: nova/api/ec2/cloud.py:878 #, python-format msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/api/ec2/cloud.py:907 nova/api/openstack/compute/contrib/volumes.py:506 +#: nova/api/ec2/cloud.py:908 nova/api/openstack/compute/contrib/volumes.py:506 #, python-format msgid "Detach volume %s" msgstr "" -#: nova/api/ec2/cloud.py:1261 +#: nova/api/ec2/cloud.py:1262 msgid "Allocate address" msgstr "" -#: nova/api/ec2/cloud.py:1266 +#: nova/api/ec2/cloud.py:1267 #, python-format msgid "Release address %s" msgstr "" -#: nova/api/ec2/cloud.py:1271 +#: nova/api/ec2/cloud.py:1272 #, python-format msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1281 +#: nova/api/ec2/cloud.py:1282 msgid "Unable to associate IP Address, no fixed_ips." msgstr "" -#: nova/api/ec2/cloud.py:1302 +#: nova/api/ec2/cloud.py:1303 #, python-format msgid "Disassociate address %s" msgstr "" -#: nova/api/ec2/cloud.py:1319 nova/api/openstack/compute/servers.py:920 +#: nova/api/ec2/cloud.py:1320 nova/api/openstack/compute/servers.py:920 #: nova/api/openstack/compute/plugins/v3/multiple_create.py:64 msgid "min_count must be <= max_count" msgstr "" -#: nova/api/ec2/cloud.py:1351 +#: nova/api/ec2/cloud.py:1352 msgid "Image must be available" msgstr "" -#: nova/api/ec2/cloud.py:1451 +#: nova/api/ec2/cloud.py:1452 #, python-format msgid "Reboot instance %r" msgstr "" -#: nova/api/ec2/cloud.py:1566 +#: nova/api/ec2/cloud.py:1567 #, python-format msgid "De-registering image %s" msgstr "" -#: nova/api/ec2/cloud.py:1582 +#: nova/api/ec2/cloud.py:1583 msgid "imageLocation is required" msgstr "" -#: nova/api/ec2/cloud.py:1602 +#: nova/api/ec2/cloud.py:1603 #, python-format msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:1663 +#: nova/api/ec2/cloud.py:1664 msgid "user or group not specified" msgstr "" -#: nova/api/ec2/cloud.py:1666 +#: nova/api/ec2/cloud.py:1667 msgid "only group \"all\" is supported" msgstr "" -#: nova/api/ec2/cloud.py:1669 +#: nova/api/ec2/cloud.py:1670 msgid "operation_type must be add or remove" msgstr "" -#: nova/api/ec2/cloud.py:1671 +#: nova/api/ec2/cloud.py:1672 #, python-format msgid "Updating image %s publicity" msgstr "" -#: nova/api/ec2/cloud.py:1684 +#: nova/api/ec2/cloud.py:1685 #, python-format msgid "Not allowed to modify attributes for image %s" msgstr "" -#: nova/api/ec2/cloud.py:1714 +#: nova/api/ec2/cloud.py:1715 #, python-format msgid "" "Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not " "have a volume attached at root (%(root)s)" msgstr "" -#: nova/api/ec2/cloud.py:1747 +#: nova/api/ec2/cloud.py:1748 #, python-format msgid "" "Couldn't stop instance %(instance)s within 1 hour. Current vm_state: " "%(vm_state)s, current task_state: %(task_state)s" msgstr "" -#: nova/api/ec2/cloud.py:1771 +#: nova/api/ec2/cloud.py:1772 #, python-format msgid "image of %(instance)s at %(now)s" msgstr "" -#: nova/api/ec2/cloud.py:1796 nova/api/ec2/cloud.py:1846 +#: nova/api/ec2/cloud.py:1797 nova/api/ec2/cloud.py:1847 msgid "resource_id and tag are required" msgstr "" -#: nova/api/ec2/cloud.py:1800 nova/api/ec2/cloud.py:1850 +#: nova/api/ec2/cloud.py:1801 nova/api/ec2/cloud.py:1851 msgid "Expecting a list of resources" msgstr "" -#: nova/api/ec2/cloud.py:1805 nova/api/ec2/cloud.py:1855 -#: nova/api/ec2/cloud.py:1913 +#: nova/api/ec2/cloud.py:1806 nova/api/ec2/cloud.py:1856 +#: nova/api/ec2/cloud.py:1914 msgid "Only instances implemented" msgstr "" -#: nova/api/ec2/cloud.py:1809 nova/api/ec2/cloud.py:1859 +#: nova/api/ec2/cloud.py:1810 nova/api/ec2/cloud.py:1860 msgid "Expecting a list of tagSets" msgstr "" -#: nova/api/ec2/cloud.py:1815 nova/api/ec2/cloud.py:1868 +#: nova/api/ec2/cloud.py:1816 nova/api/ec2/cloud.py:1869 msgid "Expecting tagSet to be key/value pairs" msgstr "" -#: nova/api/ec2/cloud.py:1822 +#: nova/api/ec2/cloud.py:1823 msgid "Expecting both key and value to be set" msgstr "" -#: nova/api/ec2/cloud.py:1873 +#: nova/api/ec2/cloud.py:1874 msgid "Expecting key to be set" msgstr "" -#: nova/api/ec2/cloud.py:1947 +#: nova/api/ec2/cloud.py:1948 msgid "Invalid CIDR" msgstr "" -#: nova/api/ec2/ec2utils.py:254 +#: nova/api/ec2/ec2utils.py:255 #, python-format msgid "Unacceptable attach status:%s for ec2 API." msgstr "" -#: nova/api/ec2/ec2utils.py:277 +#: nova/api/ec2/ec2utils.py:278 msgid "Request must include either Timestamp or Expires, but cannot contain both" msgstr "" -#: nova/api/ec2/ec2utils.py:295 +#: nova/api/ec2/ec2utils.py:296 msgid "Timestamp is invalid." msgstr "" @@ -2539,8 +2580,8 @@ msgstr "" msgid "Instance does not exist" msgstr "" -#: nova/api/openstack/compute/ips.py:90 -#: nova/api/openstack/compute/plugins/v3/ips.py:62 +#: nova/api/openstack/compute/ips.py:84 +#: nova/api/openstack/compute/plugins/v3/ips.py:56 msgid "Instance is not a member of specified network" msgstr "" @@ -2926,13 +2967,13 @@ msgstr "" msgid "Delete snapshot with id: %s" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:105 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:103 msgid "Attach interface" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:158 -#: nova/api/openstack/compute/contrib/attach_interfaces.py:184 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:116 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:145 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:166 #: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174 #: nova/network/security_group/neutron_driver.py:510 #: nova/network/security_group/neutron_driver.py:514 @@ -2942,15 +2983,11 @@ msgstr "" msgid "Network driver does not support this function." msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:124 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:120 msgid "Failed to attach interface" msgstr "" -#: nova/api/openstack/compute/contrib/attach_interfaces.py:134 -msgid "Attachments update is not supported" -msgstr "" - -#: nova/api/openstack/compute/contrib/attach_interfaces.py:146 +#: nova/api/openstack/compute/contrib/attach_interfaces.py:136 #: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144 #, python-format msgid "Detach interface %s" @@ -3429,16 +3466,6 @@ msgid "" " %(quota_used)s" msgstr "" -#: nova/api/openstack/compute/contrib/rescue.py:78 -#: nova/api/openstack/compute/plugins/v3/rescue.py:80 -msgid "The rescue operation is not implemented by this cloud." -msgstr "" - -#: nova/api/openstack/compute/contrib/rescue.py:98 -#: nova/api/openstack/compute/plugins/v3/rescue.py:104 -msgid "The unrescue operation is not implemented by this cloud." -msgstr "" - #: nova/api/openstack/compute/contrib/scheduler_hints.py:37 #: nova/api/openstack/compute/plugins/v3/scheduler_hints.py:39 msgid "Malformed scheduler_hints attribute" @@ -4763,7 +4790,7 @@ msgstr "" msgid "Volume id: %s finished being created but was not set as 'available'" msgstr "" -#: nova/compute/manager.py:1235 nova/compute/manager.py:2057 +#: nova/compute/manager.py:1235 nova/compute/manager.py:2064 msgid "Success" msgstr "" @@ -4788,7 +4815,7 @@ msgstr "" msgid "Instance build timed out. Set to error state." msgstr "" -#: nova/compute/manager.py:1524 nova/compute/manager.py:1888 +#: nova/compute/manager.py:1524 nova/compute/manager.py:1894 msgid "Starting instance..." msgstr "" @@ -4804,429 +4831,424 @@ msgstr "" msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)" msgstr "" -#: nova/compute/manager.py:2020 +#: nova/compute/manager.py:2027 #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" -#: nova/compute/manager.py:2030 nova/compute/manager.py:2080 +#: nova/compute/manager.py:2037 nova/compute/manager.py:2087 msgid "Failed to allocate the network(s), not rescheduling." msgstr "" -#: nova/compute/manager.py:2106 +#: nova/compute/manager.py:2113 msgid "Failure prepping block device." msgstr "" -#: nova/compute/manager.py:2127 +#: nova/compute/manager.py:2134 msgid "Could not clean up failed build, not rescheduling" msgstr "" -#: nova/compute/manager.py:2185 +#: nova/compute/manager.py:2192 msgid "Failed to deallocate network for instance." msgstr "" -#: nova/compute/manager.py:2206 +#: nova/compute/manager.py:2213 #, python-format msgid "%(action_str)s instance" msgstr "" -#: nova/compute/manager.py:2361 +#: nova/compute/manager.py:2368 msgid "Instance disappeared during terminate" msgstr "" -#: nova/compute/manager.py:2547 +#: nova/compute/manager.py:2554 msgid "Rebuilding instance" msgstr "" -#: nova/compute/manager.py:2560 +#: nova/compute/manager.py:2567 msgid "Invalid state of instance files on shared storage" msgstr "" -#: nova/compute/manager.py:2564 +#: nova/compute/manager.py:2571 msgid "disk on shared storage, recreating using existing disk" msgstr "" -#: nova/compute/manager.py:2568 +#: nova/compute/manager.py:2575 #, python-format msgid "disk not on shared storage, rebuilding from: '%s'" msgstr "" -#: nova/compute/manager.py:2655 -#, python-format -msgid "bringing vm to original state: '%s'" -msgstr "" - -#: nova/compute/manager.py:2686 +#: nova/compute/manager.py:2694 #, python-format msgid "Detaching from volume api: %s" msgstr "" -#: nova/compute/manager.py:2713 +#: nova/compute/manager.py:2721 msgid "Rebooting instance" msgstr "" -#: nova/compute/manager.py:2730 +#: nova/compute/manager.py:2738 #, python-format msgid "" "trying to reboot a non-running instance: (state: %(state)s expected: " "%(running)s)" msgstr "" -#: nova/compute/manager.py:2766 +#: nova/compute/manager.py:2774 msgid "Reboot failed but instance is running" msgstr "" -#: nova/compute/manager.py:2774 +#: nova/compute/manager.py:2782 #, python-format msgid "Cannot reboot instance: %s" msgstr "" -#: nova/compute/manager.py:2786 +#: nova/compute/manager.py:2794 msgid "Instance disappeared during reboot" msgstr "" -#: nova/compute/manager.py:2854 +#: nova/compute/manager.py:2862 msgid "instance snapshotting" msgstr "" -#: nova/compute/manager.py:2860 +#: nova/compute/manager.py:2868 #, python-format msgid "" "trying to snapshot a non-running instance: (state: %(state)s expected: " "%(running)s)" msgstr "" -#: nova/compute/manager.py:2893 +#: nova/compute/manager.py:2901 #, python-format msgid "Error while trying to clean up image %s" msgstr "" -#: nova/compute/manager.py:2898 +#: nova/compute/manager.py:2906 msgid "Image not found during snapshot" msgstr "" -#: nova/compute/manager.py:2980 +#: nova/compute/manager.py:2988 #, python-format msgid "Failed to set admin password. Instance %s is not running" msgstr "" -#: nova/compute/manager.py:2987 +#: nova/compute/manager.py:2995 msgid "Root password set" msgstr "" -#: nova/compute/manager.py:2992 +#: nova/compute/manager.py:3000 msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" -#: nova/compute/manager.py:3011 +#: nova/compute/manager.py:3019 msgid "error setting admin password" msgstr "" -#: nova/compute/manager.py:3027 +#: nova/compute/manager.py:3035 #, python-format msgid "" "trying to inject a file into a non-running (state: %(current_state)s " "expected: %(expected_state)s)" msgstr "" -#: nova/compute/manager.py:3032 +#: nova/compute/manager.py:3040 #, python-format msgid "injecting file to %s" msgstr "" -#: nova/compute/manager.py:3050 +#: nova/compute/manager.py:3058 msgid "" "Unable to find a different image to use for rescue VM, using instance's " "current image" msgstr "" -#: nova/compute/manager.py:3069 +#: nova/compute/manager.py:3077 msgid "Rescuing" msgstr "" -#: nova/compute/manager.py:3094 +#: nova/compute/manager.py:3102 #, python-format msgid "Driver Error: %s" msgstr "" -#: nova/compute/manager.py:3117 +#: nova/compute/manager.py:3125 msgid "Unrescuing" msgstr "" -#: nova/compute/manager.py:3188 +#: nova/compute/manager.py:3196 #, python-format msgid "Migration %s is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3193 +#: nova/compute/manager.py:3201 #, python-format msgid "Migration %s is already confirmed" msgstr "" -#: nova/compute/manager.py:3197 +#: nova/compute/manager.py:3205 #, python-format msgid "" "Unexpected confirmation status '%(status)s' of migration %(id)s, exit " "confirmation process" msgstr "" -#: nova/compute/manager.py:3211 +#: nova/compute/manager.py:3219 msgid "Instance is not found during confirmation" msgstr "" -#: nova/compute/manager.py:3392 +#: nova/compute/manager.py:3400 #, python-format msgid "Updating instance to original state: '%s'" msgstr "" -#: nova/compute/manager.py:3415 +#: nova/compute/manager.py:3423 msgid "Instance has no source host" msgstr "" -#: nova/compute/manager.py:3421 +#: nova/compute/manager.py:3429 msgid "destination same as source!" msgstr "" -#: nova/compute/manager.py:3439 +#: nova/compute/manager.py:3447 msgid "Migrating" msgstr "" -#: nova/compute/manager.py:3771 +#: nova/compute/manager.py:3784 msgid "Pausing" msgstr "" -#: nova/compute/manager.py:3788 +#: nova/compute/manager.py:3801 msgid "Unpausing" msgstr "" -#: nova/compute/manager.py:3829 nova/compute/manager.py:3846 +#: nova/compute/manager.py:3842 nova/compute/manager.py:3859 msgid "Retrieving diagnostics" msgstr "" -#: nova/compute/manager.py:3882 +#: nova/compute/manager.py:3895 msgid "Resuming" msgstr "" -#: nova/compute/manager.py:4102 +#: nova/compute/manager.py:4115 msgid "Get console output" msgstr "" -#: nova/compute/manager.py:4301 +#: nova/compute/manager.py:4314 #, python-format msgid "Attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4326 +#: nova/compute/manager.py:4339 #, python-format msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" msgstr "" -#: nova/compute/manager.py:4337 +#: nova/compute/manager.py:4350 msgid "Detaching volume from unknown instance" msgstr "" -#: nova/compute/manager.py:4525 +#: nova/compute/manager.py:4544 #, python-format msgid "allocate_port_for_instance returned %(ports)s ports" msgstr "" -#: nova/compute/manager.py:4549 +#: nova/compute/manager.py:4568 #, python-format msgid "Port %s is not attached" msgstr "" -#: nova/compute/manager.py:4561 nova/tests/compute/test_compute.py:10659 +#: nova/compute/manager.py:4580 nova/tests/compute/test_compute.py:10791 #, python-format msgid "Host %s not found" msgstr "" -#: nova/compute/manager.py:4779 +#: nova/compute/manager.py:4798 msgid "_post_live_migration() is started.." msgstr "" -#: nova/compute/manager.py:4855 +#: nova/compute/manager.py:4874 #, python-format msgid "Migrating instance to %s finished successfully." msgstr "" -#: nova/compute/manager.py:4857 +#: nova/compute/manager.py:4876 msgid "" "You may see the error \"libvirt: QEMU error: Domain not found: no domain " "with matching name.\" This error can be safely ignored." msgstr "" -#: nova/compute/manager.py:4882 +#: nova/compute/manager.py:4901 msgid "Post operation of migration started" msgstr "" -#: nova/compute/manager.py:5087 +#: nova/compute/manager.py:5106 msgid "An error occurred while refreshing the network cache." msgstr "" -#: nova/compute/manager.py:5140 +#: nova/compute/manager.py:5159 #, python-format msgid "" "Found %(migration_count)d unconfirmed migrations older than " "%(confirm_window)d seconds" msgstr "" -#: nova/compute/manager.py:5145 +#: nova/compute/manager.py:5164 #, python-format msgid "Setting migration %(migration_id)s to error: %(reason)s" msgstr "" -#: nova/compute/manager.py:5154 +#: nova/compute/manager.py:5173 #, python-format msgid "" "Automatically confirming migration %(migration_id)s for instance " "%(instance_uuid)s" msgstr "" -#: nova/compute/manager.py:5164 +#: nova/compute/manager.py:5183 #, python-format msgid "Instance %s not found" msgstr "" -#: nova/compute/manager.py:5169 +#: nova/compute/manager.py:5188 msgid "In ERROR state" msgstr "" -#: nova/compute/manager.py:5176 +#: nova/compute/manager.py:5195 #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "" -#: nova/compute/manager.py:5187 +#: nova/compute/manager.py:5206 #, python-format msgid "Error auto-confirming resize: %s. Will retry later." msgstr "" -#: nova/compute/manager.py:5236 +#: nova/compute/manager.py:5255 #, python-format msgid "" "Running instance usage audit for host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s instances." msgstr "" -#: nova/compute/manager.py:5285 +#: nova/compute/manager.py:5304 msgid "Updating bandwidth usage cache" msgstr "" -#: nova/compute/manager.py:5307 +#: nova/compute/manager.py:5326 msgid "Bandwidth usage not supported by hypervisor." msgstr "" -#: nova/compute/manager.py:5430 +#: nova/compute/manager.py:5449 #, python-format msgid "" "Found %(num_db_instances)s in the database and %(num_vm_instances)s on " "the hypervisor." msgstr "" -#: nova/compute/manager.py:5496 +#: nova/compute/manager.py:5515 #, python-format msgid "" "During the sync_power process the instance has moved from host %(src)s to" " host %(dst)s" msgstr "" -#: nova/compute/manager.py:5509 +#: nova/compute/manager.py:5528 #, python-format msgid "During sync_power_state the instance has a pending task (%(task)s). Skip." msgstr "" -#: nova/compute/manager.py:5534 +#: nova/compute/manager.py:5553 msgid "Instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5553 +#: nova/compute/manager.py:5572 msgid "Instance is suspended unexpectedly. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5569 +#: nova/compute/manager.py:5588 msgid "Instance is paused unexpectedly. Ignore." msgstr "" -#: nova/compute/manager.py:5575 +#: nova/compute/manager.py:5594 msgid "Instance is unexpectedly not found. Ignore." msgstr "" -#: nova/compute/manager.py:5581 +#: nova/compute/manager.py:5600 msgid "Instance is not stopped. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5595 +#: nova/compute/manager.py:5614 msgid "Paused instance shutdown by itself. Calling the stop API." msgstr "" -#: nova/compute/manager.py:5609 +#: nova/compute/manager.py:5628 msgid "Instance is not (soft-)deleted." msgstr "" -#: nova/compute/manager.py:5639 +#: nova/compute/manager.py:5658 msgid "Reclaiming deleted instance" msgstr "" -#: nova/compute/manager.py:5643 +#: nova/compute/manager.py:5662 #, python-format msgid "Periodic reclaim failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5668 +#: nova/compute/manager.py:5687 #, python-format msgid "Deleting orphan compute node %s" msgstr "" -#: nova/compute/manager.py:5676 nova/compute/resource_tracker.py:406 +#: nova/compute/manager.py:5695 nova/compute/resource_tracker.py:406 #, python-format msgid "No service record for host %s" msgstr "" -#: nova/compute/manager.py:5716 +#: nova/compute/manager.py:5735 #, python-format msgid "" "Detected instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5722 +#: nova/compute/manager.py:5741 #, python-format msgid "" "Powering off instance with name label '%s' which is marked as DELETED but" " still present on host." msgstr "" -#: nova/compute/manager.py:5731 +#: nova/compute/manager.py:5750 msgid "set_bootable is not implemented for the current driver" msgstr "" -#: nova/compute/manager.py:5736 +#: nova/compute/manager.py:5755 msgid "Failed to power off instance" msgstr "" -#: nova/compute/manager.py:5740 +#: nova/compute/manager.py:5759 #, python-format msgid "" "Destroying instance with name label '%s' which is marked as DELETED but " "still present on host." msgstr "" -#: nova/compute/manager.py:5750 +#: nova/compute/manager.py:5769 #, python-format msgid "Periodic cleanup failed to delete instance: %s" msgstr "" -#: nova/compute/manager.py:5754 +#: nova/compute/manager.py:5773 #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "" -#: nova/compute/manager.py:5786 +#: nova/compute/manager.py:5805 #, python-format msgid "Setting instance back to %(state)s after: %(error)s" msgstr "" -#: nova/compute/manager.py:5796 +#: nova/compute/manager.py:5815 #, python-format msgid "Setting instance back to ACTIVE after: %s" msgstr "" @@ -5595,26 +5617,26 @@ msgstr "" msgid "Exception while seeding instance_types table" msgstr "" -#: nova/image/glance.py:236 +#: nova/image/glance.py:235 #, python-format msgid "" "Error contacting glance server '%(host)s:%(port)s' for '%(method)s', " "%(extra)s." msgstr "" -#: nova/image/glance.py:268 +#: nova/image/glance.py:267 #, python-format msgid "" "When loading the module %(module_str)s the following error occurred: " "%(ex)s" msgstr "" -#: nova/image/glance.py:327 +#: nova/image/glance.py:326 #, python-format msgid "Failed to instantiate the download handler for %(scheme)s" msgstr "" -#: nova/image/glance.py:343 +#: nova/image/glance.py:342 #, python-format msgid "Successfully transferred using %s" msgstr "" @@ -5760,7 +5782,7 @@ msgstr "" msgid "Not deleting key %s" msgstr "" -#: nova/network/api.py:196 nova/network/neutronv2/api.py:812 +#: nova/network/api.py:196 nova/network/neutronv2/api.py:845 #, python-format msgid "re-assign floating IP %(address)s from instance %(instance_id)s" msgstr "" @@ -6068,88 +6090,32 @@ msgstr "" msgid "Invalid IP format %s" msgstr "" -#: nova/network/neutronv2/api.py:230 -#, python-format -msgid "Neutron error creating port on network %s" -msgstr "" - -#: nova/network/neutronv2/api.py:263 +#: nova/network/neutronv2/api.py:269 #, python-format msgid "empty project id for instance %s" msgstr "" -#: nova/network/neutronv2/api.py:298 -msgid "No network configured!" +#: nova/network/neutronv2/api.py:313 nova/network/neutronv2/api.py:678 +msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" -#: nova/network/neutronv2/api.py:318 +#: nova/network/neutronv2/api.py:335 #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more " "specific." msgstr "" -#: nova/network/neutronv2/api.py:388 -#, python-format -msgid "Failed to update port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:395 -#, python-format -msgid "Failed to delete port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:458 +#: nova/network/neutronv2/api.py:489 #, python-format msgid "Unable to reset device ID for port %s" msgstr "" -#: nova/network/neutronv2/api.py:466 -#, python-format -msgid "Port %s does not exist" -msgstr "" - -#: nova/network/neutronv2/api.py:469 nova/network/neutronv2/api.py:493 -#, python-format -msgid "Failed to delete neutron port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:647 -msgid "Multiple possible networks found, use a Network ID to be more specific." -msgstr "" - -#: nova/network/neutronv2/api.py:666 -#, python-format -msgid "Failed to access port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:898 -#, python-format -msgid "Unable to access floating IP %s" -msgstr "" - -#: nova/network/neutronv2/api.py:986 +#: nova/network/neutronv2/api.py:1021 #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" -#: nova/network/neutronv2/api.py:1030 -#, python-format -msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" -msgstr "" - -#: nova/network/neutronv2/api.py:1089 -#, python-format -msgid "Unable to update host of port %s" -msgstr "" - -#: nova/network/neutronv2/api.py:1125 -#, python-format -msgid "" -"Network %(id)s not matched with the tenants network! The ports tenant " -"%(tenant_id)s will be used." -msgstr "" - #: nova/network/security_group/neutron_driver.py:57 #, python-format msgid "Neutron Error creating security group %s" @@ -6232,6 +6198,14 @@ msgid "" "%(instance)s" msgstr "" +#: nova/network/security_group/security_group_base.py:89 +msgid "Type and Code must be integers for ICMP protocol type" +msgstr "" + +#: nova/network/security_group/security_group_base.py:92 +msgid "To and From ports must be integers" +msgstr "" + #: nova/network/security_group/security_group_base.py:134 #, python-format msgid "This rule already exists in group %s" @@ -6242,17 +6216,17 @@ msgstr "" msgid "Error setting %(attr)s" msgstr "" -#: nova/objects/base.py:256 +#: nova/objects/base.py:262 #, python-format msgid "Unable to instantiate unregistered object type %(objtype)s" msgstr "" -#: nova/objects/base.py:375 +#: nova/objects/base.py:381 #, python-format msgid "Cannot load '%s' in the base class" msgstr "" -#: nova/objects/base.py:421 +#: nova/objects/base.py:427 #, python-format msgid "%(objname)s object has no attribute '%(attrname)s'" msgstr "" @@ -6341,22 +6315,22 @@ msgstr "" msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s" msgstr "" -#: nova/openstack/common/log.py:276 +#: nova/openstack/common/log.py:289 #, python-format msgid "Deprecated: %s" msgstr "" -#: nova/openstack/common/log.py:385 +#: nova/openstack/common/log.py:397 #, python-format msgid "Error loading logging config %(log_config)s: %(err_msg)s" msgstr "" -#: nova/openstack/common/log.py:446 +#: nova/openstack/common/log.py:458 #, python-format msgid "syslog facility must be one of: %s" msgstr "" -#: nova/openstack/common/log.py:689 +#: nova/openstack/common/log.py:709 #, python-format msgid "Fatal call to deprecated config: %(msg)s" msgstr "" @@ -6737,16 +6711,6 @@ msgstr "" msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group" msgstr "" -#: nova/storage/linuxscsi.py:100 -#, python-format -msgid "Multipath call failed exit (%(code)s)" -msgstr "" - -#: nova/storage/linuxscsi.py:121 -#, python-format -msgid "Couldn't find multipath device %s" -msgstr "" - #: nova/tests/fake_ldap.py:33 msgid "Attempted to instantiate singleton" msgstr "" @@ -6767,7 +6731,7 @@ msgstr "" msgid "already detached" msgstr "" -#: nova/tests/api/test_auth.py:98 +#: nova/tests/api/test_auth.py:97 msgid "unexpected role header" msgstr "" @@ -6792,32 +6756,32 @@ msgstr "" msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores" msgstr "" -#: nova/tests/compute/test_compute.py:1696 -#: nova/tests/compute/test_compute.py:1723 -#: nova/tests/compute/test_compute.py:1801 -#: nova/tests/compute/test_compute.py:1841 -#: nova/tests/compute/test_compute.py:5644 +#: nova/tests/compute/test_compute.py:1770 +#: nova/tests/compute/test_compute.py:1797 +#: nova/tests/compute/test_compute.py:1875 +#: nova/tests/compute/test_compute.py:1915 +#: nova/tests/compute/test_compute.py:5718 #, python-format msgid "Running instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:1703 -#: nova/tests/compute/test_compute.py:1771 -#: nova/tests/compute/test_compute.py:1809 +#: nova/tests/compute/test_compute.py:1777 +#: nova/tests/compute/test_compute.py:1845 +#: nova/tests/compute/test_compute.py:1883 #, python-format msgid "After terminating instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:5655 +#: nova/tests/compute/test_compute.py:5729 #, python-format msgid "After force-killing instances: %s" msgstr "" -#: nova/tests/compute/test_compute.py:6271 +#: nova/tests/compute/test_compute.py:6345 msgid "wrong host/node" msgstr "" -#: nova/tests/compute/test_compute.py:10867 +#: nova/tests/compute/test_compute.py:10999 msgid "spawn error" msgstr "" @@ -6834,7 +6798,7 @@ msgstr "" msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs" msgstr "" -#: nova/tests/db/test_migrations.py:923 +#: nova/tests/db/test_migrations.py:931 #, python-format msgid "" "The following migrations are missing a downgrade:\n" @@ -6913,27 +6877,27 @@ msgstr "" msgid "Unexpected status code" msgstr "" -#: nova/tests/virt/hyperv/test_hypervapi.py:513 +#: nova/tests/virt/hyperv/test_hypervapi.py:515 msgid "fake vswitch not found" msgstr "" -#: nova/tests/virt/hyperv/test_hypervapi.py:966 +#: nova/tests/virt/hyperv/test_hypervapi.py:968 msgid "Simulated failure" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1048 +#: nova/tests/virt/libvirt/fakelibvirt.py:1051 msgid "Expected a list for 'auth' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1052 +#: nova/tests/virt/libvirt/fakelibvirt.py:1055 msgid "Expected a function in 'auth[0]' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1056 +#: nova/tests/virt/libvirt/fakelibvirt.py:1059 msgid "Expected a function in 'auth[1]' parameter" msgstr "" -#: nova/tests/virt/libvirt/fakelibvirt.py:1067 +#: nova/tests/virt/libvirt/fakelibvirt.py:1070 msgid "" "virEventRegisterDefaultImpl() must be called before " "connection is used." @@ -6948,21 +6912,21 @@ msgstr "" msgid "There is no VM registered" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1323 +#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1338 #, python-format msgid "Virtual Machine with ref %s is not there" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:1112 +#: nova/tests/virt/vmwareapi/fake.py:1127 msgid "Session Invalid" msgstr "" -#: nova/tests/virt/vmwareapi/fake.py:1320 +#: nova/tests/virt/vmwareapi/fake.py:1335 msgid "No Virtual Machine has been registered yet" msgstr "" -#: nova/tests/virt/vmwareapi/test_ds_util.py:221 -#: nova/virt/vmwareapi/ds_util.py:267 +#: nova/tests/virt/vmwareapi/test_ds_util.py:215 +#: nova/virt/vmwareapi/ds_util.py:261 #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" @@ -6986,12 +6950,12 @@ msgstr "" msgid "Multiple torrent URL fetcher extensions found. Failing." msgstr "" -#: nova/virt/block_device.py:241 +#: nova/virt/block_device.py:255 #, python-format msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/virt/block_device.py:363 +#: nova/virt/block_device.py:401 #, python-format msgid "Booting with volume %(volume_id)s at %(mountpoint)s" msgstr "" @@ -7006,29 +6970,29 @@ msgstr "" msgid "Invalid type for %s entry" msgstr "" -#: nova/virt/driver.py:705 +#: nova/virt/driver.py:708 msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" -#: nova/virt/driver.py:1261 +#: nova/virt/driver.py:1264 msgid "Event must be an instance of nova.virt.event.Event" msgstr "" -#: nova/virt/driver.py:1267 +#: nova/virt/driver.py:1270 #, python-format msgid "Exception dispatching event %(event)s: %(ex)s" msgstr "" -#: nova/virt/driver.py:1361 +#: nova/virt/driver.py:1364 msgid "Compute driver option required, but not specified" msgstr "" -#: nova/virt/driver.py:1364 +#: nova/virt/driver.py:1367 #, python-format msgid "Loading compute driver '%s'" msgstr "" -#: nova/virt/driver.py:1371 +#: nova/virt/driver.py:1374 msgid "Unable to load the virtualization driver" msgstr "" @@ -7061,22 +7025,22 @@ msgstr "" msgid "Attempted to unfilter instance which is not filtered" msgstr "" -#: nova/virt/hardware.py:45 +#: nova/virt/hardware.py:46 #, python-format msgid "No CPUs available after parsing %r" msgstr "" -#: nova/virt/hardware.py:77 nova/virt/hardware.py:81 +#: nova/virt/hardware.py:78 nova/virt/hardware.py:82 #, python-format msgid "Invalid range expression %r" msgstr "" -#: nova/virt/hardware.py:90 +#: nova/virt/hardware.py:91 #, python-format msgid "Invalid exclusion expression %r" msgstr "" -#: nova/virt/hardware.py:97 +#: nova/virt/hardware.py:98 #, python-format msgid "Invalid inclusion expression %r" msgstr "" @@ -7567,22 +7531,22 @@ msgid "" " (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:154 +#: nova/virt/disk/vfs/guestfs.py:156 #, python-format msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:170 +#: nova/virt/disk/vfs/guestfs.py:172 #, python-format msgid "Failed to close augeas %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:178 +#: nova/virt/disk/vfs/guestfs.py:180 #, python-format msgid "Failed to shutdown appliance %s" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:186 +#: nova/virt/disk/vfs/guestfs.py:188 #, python-format msgid "Failed to close guest handle %s" msgstr "" @@ -7697,6 +7661,13 @@ msgstr "" msgid "Failed to remove snapshot for VM %s" msgstr "" +#: nova/virt/hyperv/utilsfactory.py:68 +msgid "" +"The \"force_hyperv_utils_v1\" option cannot be set to \"True\" on Windows" +" Server / Hyper-V Server 2012 R2 or above as the WMI " +"\"root/virtualization\" namespace is no longer supported." +msgstr "" + #: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64 #, python-format msgid "Unsupported disk format: %s" @@ -7736,12 +7707,12 @@ msgstr "" msgid "Spawning new instance" msgstr "" -#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:576 +#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:574 #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "" -#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:580 +#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:578 msgid "Using config drive for instance" msgstr "" @@ -7750,7 +7721,7 @@ msgstr "" msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:605 +#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:603 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" @@ -7841,135 +7812,151 @@ msgstr "" msgid "Unable to determine disk bus for '%s'" msgstr "" -#: nova/virt/libvirt/driver.py:552 +#: nova/virt/libvirt/driver.py:550 #, python-format msgid "Connection to libvirt lost: %s" msgstr "" -#: nova/virt/libvirt/driver.py:741 +#: nova/virt/libvirt/driver.py:739 #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" -#: nova/virt/libvirt/driver.py:924 +#: nova/virt/libvirt/driver.py:922 msgid "operation time out" msgstr "" -#: nova/virt/libvirt/driver.py:1248 +#: nova/virt/libvirt/driver.py:1246 #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" -#: nova/virt/libvirt/driver.py:1255 +#: nova/virt/libvirt/driver.py:1253 #, python-format msgid "Volume sets block size, but libvirt '%s' or later is required." msgstr "" -#: nova/virt/libvirt/driver.py:1345 +#: nova/virt/libvirt/driver.py:1351 msgid "Swap only supports host devices" msgstr "" -#: nova/virt/libvirt/driver.py:1631 +#: nova/virt/libvirt/driver.py:1638 msgid "libvirt error while requesting blockjob info." msgstr "" -#: nova/virt/libvirt/driver.py:1774 +#: nova/virt/libvirt/driver.py:1783 msgid "Found no disk to snapshot." msgstr "" -#: nova/virt/libvirt/driver.py:1866 +#: nova/virt/libvirt/driver.py:1875 #, python-format msgid "Unknown type: %s" msgstr "" -#: nova/virt/libvirt/driver.py:1871 +#: nova/virt/libvirt/driver.py:1880 msgid "snapshot_id required in create_info" msgstr "" -#: nova/virt/libvirt/driver.py:1929 +#: nova/virt/libvirt/driver.py:1938 #, python-format msgid "Libvirt '%s' or later is required for online deletion of volume snapshots." msgstr "" -#: nova/virt/libvirt/driver.py:1936 +#: nova/virt/libvirt/driver.py:1945 #, python-format msgid "Unknown delete_info type %s" msgstr "" -#: nova/virt/libvirt/driver.py:1964 +#: nova/virt/libvirt/driver.py:1981 #, python-format msgid "Disk with id: %s not found attached to instance." msgstr "" -#: nova/virt/libvirt/driver.py:2406 nova/virt/xenapi/vmops.py:1561 +#: nova/virt/libvirt/driver.py:1990 +msgid "filename cannot be None" +msgstr "" + +#: nova/virt/libvirt/driver.py:2019 +#, python-format +msgid "no match found for %s" +msgstr "" + +#: nova/virt/libvirt/driver.py:2076 +#, python-format +msgid "" +"Relative blockcommit support was not detected. Libvirt '%s' or later is " +"required for online deletion of network storage-backed volume snapshots." +msgstr "" + +#: nova/virt/libvirt/driver.py:2491 nova/virt/xenapi/vmops.py:1561 msgid "Guest does not have a console available" msgstr "" -#: nova/virt/libvirt/driver.py:2735 +#: nova/virt/libvirt/driver.py:2820 #, python-format msgid "%s format is not supported" msgstr "" -#: nova/virt/libvirt/driver.py:2841 +#: nova/virt/libvirt/driver.py:2926 #, python-format msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted" msgstr "" -#: nova/virt/libvirt/driver.py:2984 +#: nova/virt/libvirt/driver.py:3069 #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt " "hypervisor '%s' does not support selecting CPU models" msgstr "" -#: nova/virt/libvirt/driver.py:2990 +#: nova/virt/libvirt/driver.py:3075 msgid "Config requested a custom CPU model, but no model name was provided" msgstr "" -#: nova/virt/libvirt/driver.py:2994 +#: nova/virt/libvirt/driver.py:3079 msgid "A CPU model name should not be set when a host CPU model is requested" msgstr "" -#: nova/virt/libvirt/driver.py:3586 +#: nova/virt/libvirt/driver.py:3689 #, python-format msgid "" "Error from libvirt while looking up %(instance_id)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3607 +#: nova/virt/libvirt/driver.py:3710 #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:3873 +#: nova/virt/libvirt/driver.py:3976 msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range." msgstr "" -#: nova/virt/libvirt/driver.py:3998 +#: nova/virt/libvirt/driver.py:4101 msgid "libvirt version is too old (does not support getVersion)" msgstr "" -#: nova/virt/libvirt/driver.py:4359 +#: nova/virt/libvirt/driver.py:4462 msgid "Block migration can not be used with shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:4368 +#: nova/virt/libvirt/driver.py:4471 msgid "Live migration can not be used without shared storage." msgstr "" -#: nova/virt/libvirt/driver.py:4438 +#: nova/virt/libvirt/driver.py:4541 #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too " "large(available on destination host:%(available)s < need:%(necessary)s)" msgstr "" -#: nova/virt/libvirt/driver.py:4477 +#: nova/virt/libvirt/driver.py:4580 #, python-format msgid "" "CPU doesn't have compatibility.\n" @@ -7979,12 +7966,12 @@ msgid "" "Refer to %(u)s" msgstr "" -#: nova/virt/libvirt/driver.py:4540 +#: nova/virt/libvirt/driver.py:4643 #, python-format msgid "The firewall filter for %s does not exist" msgstr "" -#: nova/virt/libvirt/driver.py:4603 +#: nova/virt/libvirt/driver.py:4706 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag " "or your destination node does not support retrieving listen addresses. " @@ -7993,7 +7980,7 @@ msgid "" "address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)." msgstr "" -#: nova/virt/libvirt/driver.py:4620 +#: nova/virt/libvirt/driver.py:4723 msgid "" "Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag," " and the graphics (VNC and/or SPICE) listen addresses on the destination" @@ -8003,14 +7990,14 @@ msgid "" "succeed, but the VM will continue to listen on the current addresses." msgstr "" -#: nova/virt/libvirt/driver.py:4997 +#: nova/virt/libvirt/driver.py:5100 #, python-format msgid "" "Error from libvirt while getting description of %(instance_name)s: [Error" " Code %(error_code)s] %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:5123 +#: nova/virt/libvirt/driver.py:5226 msgid "Unable to resize disk down." msgstr "" @@ -8074,19 +8061,19 @@ msgstr "" msgid "volume_clear='%s' is not handled" msgstr "" -#: nova/virt/libvirt/rbd.py:104 +#: nova/virt/libvirt/rbd_utils.py:104 msgid "rbd python libraries not found" msgstr "" -#: nova/virt/libvirt/rbd.py:159 +#: nova/virt/libvirt/rbd_utils.py:159 msgid "Not stored in rbd" msgstr "" -#: nova/virt/libvirt/rbd.py:163 +#: nova/virt/libvirt/rbd_utils.py:163 msgid "Blank components" msgstr "" -#: nova/virt/libvirt/rbd.py:166 +#: nova/virt/libvirt/rbd_utils.py:166 msgid "Not an rbd snapshot" msgstr "" @@ -8098,13 +8085,13 @@ msgstr "" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" -#: nova/virt/libvirt/vif.py:338 nova/virt/libvirt/vif.py:545 -#: nova/virt/libvirt/vif.py:709 +#: nova/virt/libvirt/vif.py:322 nova/virt/libvirt/vif.py:508 +#: nova/virt/libvirt/vif.py:652 msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" -#: nova/virt/libvirt/vif.py:344 nova/virt/libvirt/vif.py:551 -#: nova/virt/libvirt/vif.py:715 +#: nova/virt/libvirt/vif.py:328 nova/virt/libvirt/vif.py:514 +#: nova/virt/libvirt/vif.py:658 #, python-format msgid "Unexpected vif_type=%s" msgstr "" @@ -8127,48 +8114,54 @@ msgstr "" msgid "Fibre Channel device not found." msgstr "" -#: nova/virt/vmwareapi/driver.py:127 +#: nova/virt/vmwareapi/driver.py:125 +msgid "" +"Must specify host_ip, host_username and host_password to use " +"vmwareapi.VMwareVCDriver" +msgstr "" + +#: nova/virt/vmwareapi/driver.py:134 #, python-format msgid "Invalid Regular Expression %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:141 +#: nova/virt/vmwareapi/driver.py:148 #, python-format msgid "All clusters specified %s were not found in the vCenter" msgstr "" -#: nova/virt/vmwareapi/driver.py:319 +#: nova/virt/vmwareapi/driver.py:342 #, python-format msgid "The resource %s does not exist" msgstr "" -#: nova/virt/vmwareapi/driver.py:381 +#: nova/virt/vmwareapi/driver.py:404 #, python-format msgid "Invalid cluster or resource pool name : %s" msgstr "" -#: nova/virt/vmwareapi/driver.py:555 +#: nova/virt/vmwareapi/driver.py:582 msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we " "do not return uptime for just one host." msgstr "" -#: nova/virt/vmwareapi/driver.py:678 +#: nova/virt/vmwareapi/driver.py:705 #, python-format msgid "Unable to validate session %s!" msgstr "" -#: nova/virt/vmwareapi/driver.py:720 +#: nova/virt/vmwareapi/driver.py:747 #, python-format msgid "Session %s is inactive!" msgstr "" -#: nova/virt/vmwareapi/driver.py:811 +#: nova/virt/vmwareapi/driver.py:838 #, python-format msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" msgstr "" -#: nova/virt/vmwareapi/driver.py:821 +#: nova/virt/vmwareapi/driver.py:848 #, python-format msgid "In vmwareapi:_poll_task, Got this error %s" msgstr "" @@ -8385,73 +8378,73 @@ msgstr "" msgid "Extending virtual disk failed with error: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:253 +#: nova/virt/vmwareapi/vmops.py:252 msgid "Image disk size greater than requested disk size" msgstr "" -#: nova/virt/vmwareapi/vmops.py:861 +#: nova/virt/vmwareapi/vmops.py:859 msgid "instance is not powered on" msgstr "" -#: nova/virt/vmwareapi/vmops.py:889 +#: nova/virt/vmwareapi/vmops.py:887 msgid "Instance does not exist on backend" msgstr "" -#: nova/virt/vmwareapi/vmops.py:916 +#: nova/virt/vmwareapi/vmops.py:914 #, python-format msgid "" "In vmwareapi:vmops:_destroy_instance, got this exception while un-" "registering the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:939 +#: nova/virt/vmwareapi/vmops.py:937 msgid "" "In vmwareapi:vmops:_destroy_instance, exception while deleting the VM " "contents from the disk" msgstr "" -#: nova/virt/vmwareapi/vmops.py:971 +#: nova/virt/vmwareapi/vmops.py:969 msgid "pause not supported for vmwareapi" msgstr "" -#: nova/virt/vmwareapi/vmops.py:975 +#: nova/virt/vmwareapi/vmops.py:973 msgid "unpause not supported for vmwareapi" msgstr "" -#: nova/virt/vmwareapi/vmops.py:993 +#: nova/virt/vmwareapi/vmops.py:991 msgid "instance is powered off and cannot be suspended." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1013 +#: nova/virt/vmwareapi/vmops.py:1011 msgid "instance is not in a suspended state" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1113 +#: nova/virt/vmwareapi/vmops.py:1111 msgid "Unable to shrink disk." msgstr "" -#: nova/virt/vmwareapi/vmops.py:1172 +#: nova/virt/vmwareapi/vmops.py:1170 #, python-format msgid "" "In vmwareapi:vmops:confirm_migration, got this exception while destroying" " the VM: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1248 nova/virt/xenapi/vmops.py:1500 +#: nova/virt/vmwareapi/vmops.py:1246 nova/virt/xenapi/vmops.py:1500 #, python-format msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1252 nova/virt/xenapi/vmops.py:1504 +#: nova/virt/vmwareapi/vmops.py:1250 nova/virt/xenapi/vmops.py:1504 msgid "Automatically hard rebooting" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1570 +#: nova/virt/vmwareapi/vmops.py:1568 #, python-format msgid "No device with interface-id %s exists on VM" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1580 +#: nova/virt/vmwareapi/vmops.py:1578 #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "" diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po index e66d4d2e70..7806193e69 100644 --- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/" @@ -196,7 +196,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -205,99 +205,139 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "Falha ao notificar células de falha da instância" @@ -389,116 +429,116 @@ msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -515,17 +555,17 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" @@ -535,19 +575,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -566,18 +606,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po index 1da5c68366..ac1c2850f7 100644 --- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,106 +151,111 @@ msgstr "Excluindo linha duplicada com ID: %(id)s da tabela: %(table)s" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "Instância destruída com êxito." -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "A instância pode ser iniciada novamente." -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "Destruindo a instância novamente." -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "Começando o processo de captura instantânea em tempo real" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "Iniciando processo de captura instantânea a frio" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "Captura instantânea extraída, iniciando upload da imagem" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "Upload da imagem de captura instantânea concluído" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "Reinicialização virtual da instância bem-sucedida." -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "A instância foi encerrada com êxito." -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" "A instância pode ter sido reinicializada durante a reinicialização virtual, " "portanto retorne agora." -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "Instância reinicializada com êxito." -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "Feito spawn da instância com êxito." -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "dados: %(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "Log do console truncado retornado, %d bytes ignorados" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "Criando imagem" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "Usando unidade de configuração" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "Criando unidade de configuração em %(path)s" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " @@ -254,26 +264,26 @@ msgstr "" "Não foi possível localizar o domínio em libvirt para a instância %s. Não é " "possível obter estatísticas do bloco para o dispositivo" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "Instância executando com êxito." -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -290,12 +300,12 @@ msgstr "Assegurando filtros estáticos" msgid "Attempted to unfilter instance which is not filtered" msgstr "Tentou cancelar a filtragem da instância que não foi filtrada" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "Gravando informações armazenadas em %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" @@ -303,27 +313,27 @@ msgstr "" "imagem %(id)s em (%(base_file)s): verificação de imagem ignorada, nenhum " "hash armazenado" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s): gerando soma de verificação" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "O arquivo base é muito jovem para ser removido: %s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "Removendo arquivo base: %s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "imagem %(id)s em (%(base_file)s): verificando" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -332,17 +342,17 @@ msgstr "" "imagem %(id)s em (%(base_file)s): em uso: neste nó %(local)d local, " "%(remote)d em outros nós que compartilham esse armazenamento de instância" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "Arquivos base ativos: %s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "Arquivos base corrompidos: %s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "Arquivos base removíveis: %s" diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po index 5c174f713d..cce29b7642 100644 --- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po +++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:04+0000\n" "PO-Revision-Date: 2014-06-14 19:30+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/" @@ -196,7 +196,7 @@ msgstr "" msgid "Failed to dealloc network for failed instance" msgstr "" -#: nova/compute/manager.py:1458 nova/compute/manager.py:3514 +#: nova/compute/manager.py:1458 nova/compute/manager.py:3527 msgid "Error trying to reschedule" msgstr "" @@ -205,99 +205,139 @@ msgstr "" msgid "Instance failed network setup after %(attempts)d attempt(s)" msgstr "" -#: nova/compute/manager.py:1755 +#: nova/compute/manager.py:1761 msgid "Instance failed block device setup" msgstr "" -#: nova/compute/manager.py:1775 nova/compute/manager.py:2116 -#: nova/compute/manager.py:4058 +#: nova/compute/manager.py:1781 nova/compute/manager.py:2123 +#: nova/compute/manager.py:4071 msgid "Instance failed to spawn" msgstr "" -#: nova/compute/manager.py:1957 +#: nova/compute/manager.py:1964 msgid "Unexpected build failure, not rescheduling build." msgstr "" -#: nova/compute/manager.py:2026 nova/compute/manager.py:2078 +#: nova/compute/manager.py:2033 nova/compute/manager.py:2085 msgid "Failed to allocate network(s)" msgstr "" -#: nova/compute/manager.py:2104 +#: nova/compute/manager.py:2111 msgid "Failure prepping block device" msgstr "" -#: nova/compute/manager.py:2137 +#: nova/compute/manager.py:2144 msgid "Failed to deallocate networks" msgstr "" -#: nova/compute/manager.py:2367 nova/compute/manager.py:3705 -#: nova/compute/manager.py:5803 +#: nova/compute/manager.py:2374 nova/compute/manager.py:3718 +#: nova/compute/manager.py:5822 msgid "Setting instance vm_state to ERROR" msgstr "" -#: nova/compute/manager.py:2579 nova/compute/manager.py:4914 +#: nova/compute/manager.py:2586 nova/compute/manager.py:4933 #, python-format msgid "Failed to get compute_info for %s" msgstr "" -#: nova/compute/manager.py:3005 +#: nova/compute/manager.py:3013 #, python-format msgid "set_admin_password failed: %s" msgstr "" -#: nova/compute/manager.py:3090 +#: nova/compute/manager.py:3098 msgid "Error trying to Rescue Instance" msgstr "" -#: nova/compute/manager.py:3711 +#: nova/compute/manager.py:3724 #, python-format msgid "Failed to rollback quota for failed finish_resize: %s" msgstr "" -#: nova/compute/manager.py:4310 +#: nova/compute/manager.py:4323 #, python-format msgid "Failed to attach %(volume_id)s at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4349 +#: nova/compute/manager.py:4362 #, python-format msgid "Failed to detach volume %(volume_id)s from %(mp)s" msgstr "" -#: nova/compute/manager.py:4422 +#: nova/compute/manager.py:4441 #, python-format msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s" msgstr "" -#: nova/compute/manager.py:4429 +#: nova/compute/manager.py:4448 #, python-format msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s" msgstr "" -#: nova/compute/manager.py:4716 +#: nova/compute/manager.py:4735 #, python-format msgid "Pre live migration failed at %s" msgstr "" -#: nova/compute/manager.py:5216 +#: nova/compute/manager.py:5235 msgid "Periodic task failed to offload instance." msgstr "" -#: nova/compute/manager.py:5256 +#: nova/compute/manager.py:5275 #, python-format msgid "Failed to generate usage audit for instance on host %s" msgstr "" -#: nova/compute/manager.py:5446 +#: nova/compute/manager.py:5465 msgid "" "Periodic sync_power_state task had an error while processing an instance." msgstr "" -#: nova/compute/manager.py:5549 nova/compute/manager.py:5558 -#: nova/compute/manager.py:5589 nova/compute/manager.py:5600 +#: nova/compute/manager.py:5568 nova/compute/manager.py:5577 +#: nova/compute/manager.py:5608 nova/compute/manager.py:5619 msgid "error during stop() in sync_power_state." msgstr "" +#: nova/network/neutronv2/api.py:234 +#, python-format +msgid "Neutron error creating port on network %s" +msgstr "" + +#: nova/network/neutronv2/api.py:418 +#, python-format +msgid "Failed to update port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:425 +#, python-format +msgid "Failed to delete port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524 +#, python-format +msgid "Failed to delete neutron port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:697 +#, python-format +msgid "Failed to access port %s" +msgstr "" + +#: nova/network/neutronv2/api.py:931 +#, python-format +msgid "Unable to access floating IP %s" +msgstr "" + +#: nova/network/neutronv2/api.py:1065 +#, python-format +msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s" +msgstr "" + +#: nova/network/neutronv2/api.py:1124 +#, python-format +msgid "Unable to update host of port %s" +msgstr "" + #: nova/objects/instance_fault.py:87 msgid "Failed to notify cells of instance fault" msgstr "未能通知单元有关实例故障的事项" @@ -389,116 +429,116 @@ msgid "" "Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s" msgstr "" -#: nova/virt/libvirt/driver.py:641 +#: nova/virt/libvirt/driver.py:639 #, python-format msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater." msgstr "" -#: nova/virt/libvirt/driver.py:766 +#: nova/virt/libvirt/driver.py:764 #, python-format msgid "Connection to libvirt failed: %s" msgstr "" -#: nova/virt/libvirt/driver.py:929 +#: nova/virt/libvirt/driver.py:927 #, python-format msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1007 +#: nova/virt/libvirt/driver.py:1005 #, python-format msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1035 +#: nova/virt/libvirt/driver.py:1033 #, python-format msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:1438 +#: nova/virt/libvirt/driver.py:1444 msgid "attaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1465 +#: nova/virt/libvirt/driver.py:1471 msgid "detaching network adapter failed." msgstr "" -#: nova/virt/libvirt/driver.py:1717 +#: nova/virt/libvirt/driver.py:1726 msgid "Failed to send updated snapshot status to volume service." msgstr "" -#: nova/virt/libvirt/driver.py:1825 +#: nova/virt/libvirt/driver.py:1834 msgid "" "Unable to create quiesced VM snapshot, attempting again with quiescing " "disabled." msgstr "" -#: nova/virt/libvirt/driver.py:1831 +#: nova/virt/libvirt/driver.py:1840 msgid "Unable to create VM snapshot, failing volume_snapshot operation." msgstr "" -#: nova/virt/libvirt/driver.py:1880 +#: nova/virt/libvirt/driver.py:1889 msgid "" "Error occurred during volume_snapshot_create, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2026 +#: nova/virt/libvirt/driver.py:2111 msgid "" "Error occurred during volume_snapshot_delete, sending error status to Cinder." msgstr "" -#: nova/virt/libvirt/driver.py:2492 nova/virt/libvirt/driver.py:2497 +#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582 #, python-format msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" msgstr "" -#: nova/virt/libvirt/driver.py:2620 +#: nova/virt/libvirt/driver.py:2705 #, python-format msgid "Error injecting data into image %(img_id)s (%(e)s)" msgstr "" -#: nova/virt/libvirt/driver.py:2788 +#: nova/virt/libvirt/driver.py:2873 #, python-format msgid "Creating config drive failed with error: %s" msgstr "" -#: nova/virt/libvirt/driver.py:2881 +#: nova/virt/libvirt/driver.py:2966 #, python-format msgid "Attaching PCI devices %(dev)s to %(dom)s failed." msgstr "" -#: nova/virt/libvirt/driver.py:3680 +#: nova/virt/libvirt/driver.py:3783 #, python-format msgid "Error defining a domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3684 +#: nova/virt/libvirt/driver.py:3787 #, python-format msgid "Error launching a defined domain with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3689 +#: nova/virt/libvirt/driver.py:3792 #, python-format msgid "Error enabling hairpin mode with XML: %s" msgstr "" -#: nova/virt/libvirt/driver.py:3703 +#: nova/virt/libvirt/driver.py:3806 #, python-format msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s" msgstr "" -#: nova/virt/libvirt/driver.py:4012 +#: nova/virt/libvirt/driver.py:4115 #, python-format msgid "" "Hostname has changed from %(old)s to %(new)s. A restart is required to take " "effect." msgstr "" -#: nova/virt/libvirt/driver.py:4691 +#: nova/virt/libvirt/driver.py:4794 #, python-format msgid "Live Migration failure: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5487 +#: nova/virt/libvirt/driver.py:5596 #, python-format msgid "Failed to cleanup directory %(target)s: %(e)s" msgstr "" @@ -515,17 +555,17 @@ msgid "" "%(size)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:130 +#: nova/virt/libvirt/imagecache.py:129 #, python-format msgid "Error reading image info file %(filename)s: %(error)s" msgstr "" -#: nova/virt/libvirt/imagecache.py:391 +#: nova/virt/libvirt/imagecache.py:390 #, python-format msgid "image %(id)s at (%(base_file)s): image verification failed" msgstr "" -#: nova/virt/libvirt/imagecache.py:448 +#: nova/virt/libvirt/imagecache.py:447 #, python-format msgid "Failed to remove %(base_file)s, error was %(error)s" msgstr "" @@ -535,19 +575,19 @@ msgstr "" msgid "ignoring unrecognized volume_clear='%s' value" msgstr "" -#: nova/virt/libvirt/rbd.py:62 +#: nova/virt/libvirt/rbd_utils.py:62 #, python-format msgid "error opening rbd image %s" msgstr "" -#: nova/virt/libvirt/vif.py:485 nova/virt/libvirt/vif.py:509 -#: nova/virt/libvirt/vif.py:533 +#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474 +#: nova/virt/libvirt/vif.py:496 msgid "Failed while plugging vif" msgstr "" -#: nova/virt/libvirt/vif.py:588 nova/virt/libvirt/vif.py:605 -#: nova/virt/libvirt/vif.py:627 nova/virt/libvirt/vif.py:649 -#: nova/virt/libvirt/vif.py:674 nova/virt/libvirt/vif.py:696 +#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560 +#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598 +#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639 msgid "Failed while unplugging vif" msgstr "" @@ -566,18 +606,18 @@ msgstr "" msgid "Couldn't unmount the GlusterFS share %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:509 +#: nova/virt/vmwareapi/vmops.py:508 #, python-format msgid "" "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1553 +#: nova/virt/vmwareapi/vmops.py:1551 #, python-format msgid "Attaching network adapter failed. Exception: %s" msgstr "" -#: nova/virt/vmwareapi/vmops.py:1593 +#: nova/virt/vmwareapi/vmops.py:1591 #, python-format msgid "Detaching network adapter failed. Exception: %s" msgstr "" diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po index eba72ed4f7..1e340e8082 100644 --- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-07-16 14:42+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,130 +151,135 @@ msgstr "正在从表 %(table)s 中删除具有id %(id)s 的重复行" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "实例销毁成功。" -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "可再次启动实例。" -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "将再次销毁实例。" -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "正在开始实时快照流程" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "正在结束冷快照流程" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "已抽取快照,正在开始映像上载" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "快照映像上载完成" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "已成功执行实例软重新引导。" -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "已成功关闭实例。" -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "在软重新引导期间,可能已重新引导实例,因此会立即返回。" -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "实例成功重启。" -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "实例成功生产。" -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "data:%(data)r, fpath: %(fpath)r" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "已返回截断的控制台日志,忽略了 %d 个字节" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "正在创建镜像" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "正在使用配置驱动器" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "正在 %(path)s 处创建配置驱动器" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "对于实例 %s,在 libvirt 中找不到域。无法获取设备的块统计信息" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "实例正在成功运行。" -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -286,38 +296,38 @@ msgstr "正在确保静态过滤器" msgid "Attempted to unfilter instance which is not filtered" msgstr "试图不过滤没有过滤的实例" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "正在将已存储的信息写入 %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" msgstr "(%(base_file)s) 处的映像 %(id)s:已跳过映像验证,未存储任何散列" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s):正在生成校验和" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "基文件太新不需要删除:%s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "正在删除基文件:%s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "(%(base_file)s) 处的映像 %(id)s:正在检查" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -326,17 +336,17 @@ msgstr "" "(%(base_file)s) 处的映像 %(id)s:在使用中:在此节点上,%(local)d 本地;在共享" "此实例存储器的其他节点上,%(remote)d" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "活跃的基文件:%s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "损坏的基文件:%s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "可删除的基文件:%s" diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po index 76a4587a3c..82536fcae3 100644 --- a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po +++ b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2014-08-12 06:05+0000\n" +"POT-Creation-Date: 2014-08-18 06:03+0000\n" "PO-Revision-Date: 2014-06-18 19:31+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/nova/" @@ -44,7 +44,12 @@ msgstr "" msgid "Deleting network with id %s" msgstr "" -#: nova/compute/manager.py:5452 +#: nova/compute/manager.py:2663 +#, python-format +msgid "bringing vm to original state: '%s'" +msgstr "" + +#: nova/compute/manager.py:5471 #, python-format msgid "" "During sync_power_state the instance has a pending task (%(task)s). Skip." @@ -146,130 +151,135 @@ msgstr "" msgid "%(num_values)d values found, of which the minimum value will be used." msgstr "" +#: nova/virt/block_device.py:221 +#, python-format +msgid "preserve multipath_id %s" +msgstr "" + #: nova/virt/firewall.py:444 #, python-format msgid "instance chain %s disappeared during refresh, skipping" msgstr "" -#: nova/virt/disk/vfs/guestfs.py:137 +#: nova/virt/disk/vfs/guestfs.py:139 msgid "Unable to force TCG mode, libguestfs too old?" msgstr "" -#: nova/virt/libvirt/driver.py:837 +#: nova/virt/libvirt/driver.py:835 #, python-format msgid "" "Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s" msgstr "" -#: nova/virt/libvirt/driver.py:950 +#: nova/virt/libvirt/driver.py:948 msgid "Instance destroyed successfully." msgstr "" -#: nova/virt/libvirt/driver.py:960 +#: nova/virt/libvirt/driver.py:958 msgid "Instance may be started again." msgstr "" -#: nova/virt/libvirt/driver.py:970 +#: nova/virt/libvirt/driver.py:968 msgid "Going to destroy instance again." msgstr "" -#: nova/virt/libvirt/driver.py:1570 +#: nova/virt/libvirt/driver.py:1576 msgid "Beginning live snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1573 +#: nova/virt/libvirt/driver.py:1579 msgid "Beginning cold snapshot process" msgstr "" -#: nova/virt/libvirt/driver.py:1602 +#: nova/virt/libvirt/driver.py:1608 msgid "Snapshot extracted, beginning image upload" msgstr "" -#: nova/virt/libvirt/driver.py:1614 +#: nova/virt/libvirt/driver.py:1620 msgid "Snapshot image upload complete" msgstr "" -#: nova/virt/libvirt/driver.py:2047 +#: nova/virt/libvirt/driver.py:2132 msgid "Instance soft rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2090 +#: nova/virt/libvirt/driver.py:2175 msgid "Instance shutdown successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2098 +#: nova/virt/libvirt/driver.py:2183 msgid "Instance may have been rebooted during soft reboot, so return now." msgstr "" -#: nova/virt/libvirt/driver.py:2167 +#: nova/virt/libvirt/driver.py:2252 msgid "Instance rebooted successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2335 +#: nova/virt/libvirt/driver.py:2420 msgid "Instance spawned successfully." msgstr "" -#: nova/virt/libvirt/driver.py:2351 +#: nova/virt/libvirt/driver.py:2436 #, python-format msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/virt/libvirt/driver.py:2390 nova/virt/libvirt/driver.py:2417 +#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502 #, python-format msgid "Truncated console log returned, %d bytes ignored" msgstr "" -#: nova/virt/libvirt/driver.py:2646 +#: nova/virt/libvirt/driver.py:2731 msgid "Creating image" msgstr "" -#: nova/virt/libvirt/driver.py:2772 +#: nova/virt/libvirt/driver.py:2857 msgid "Using config drive" msgstr "" -#: nova/virt/libvirt/driver.py:2781 +#: nova/virt/libvirt/driver.py:2866 #, python-format msgid "Creating config drive at %(path)s" msgstr "" -#: nova/virt/libvirt/driver.py:3334 +#: nova/virt/libvirt/driver.py:3437 msgid "Configuring timezone for windows instance to localtime" msgstr "" -#: nova/virt/libvirt/driver.py:4217 +#: nova/virt/libvirt/driver.py:4320 #, python-format msgid "" "Getting block stats failed, device might have been detached. Instance=" "%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s" msgstr "" -#: nova/virt/libvirt/driver.py:4223 +#: nova/virt/libvirt/driver.py:4326 #, python-format msgid "" "Could not find domain in libvirt for instance %s. Cannot get block stats for " "device" msgstr "" -#: nova/virt/libvirt/driver.py:4465 +#: nova/virt/libvirt/driver.py:4568 #, python-format msgid "Instance launched has CPU info: %s" msgstr "" -#: nova/virt/libvirt/driver.py:5207 +#: nova/virt/libvirt/driver.py:5316 msgid "Instance running successfully." msgstr "" -#: nova/virt/libvirt/driver.py:5481 +#: nova/virt/libvirt/driver.py:5590 #, python-format msgid "Deleting instance files %s" msgstr "" -#: nova/virt/libvirt/driver.py:5494 +#: nova/virt/libvirt/driver.py:5603 #, python-format msgid "Deletion of %s failed" msgstr "" -#: nova/virt/libvirt/driver.py:5498 +#: nova/virt/libvirt/driver.py:5607 #, python-format msgid "Deletion of %s complete" msgstr "" @@ -286,38 +296,38 @@ msgstr "" msgid "Attempted to unfilter instance which is not filtered" msgstr "" -#: nova/virt/libvirt/imagecache.py:191 +#: nova/virt/libvirt/imagecache.py:190 #, python-format msgid "Writing stored info to %s" msgstr "正在將儲存的資訊寫入 %s" -#: nova/virt/libvirt/imagecache.py:401 +#: nova/virt/libvirt/imagecache.py:400 #, python-format msgid "" "image %(id)s at (%(base_file)s): image verification skipped, no hash stored" msgstr "映像檔 %(id)s (%(base_file)s):已跳過映像檔驗證,未儲存雜湊" -#: nova/virt/libvirt/imagecache.py:410 +#: nova/virt/libvirt/imagecache.py:409 #, python-format msgid "%(id)s (%(base_file)s): generating checksum" msgstr "%(id)s (%(base_file)s):正在產生總和檢查" -#: nova/virt/libvirt/imagecache.py:438 +#: nova/virt/libvirt/imagecache.py:437 #, python-format msgid "Base file too young to remove: %s" msgstr "基本檔案太新,無法移除:%s" -#: nova/virt/libvirt/imagecache.py:441 +#: nova/virt/libvirt/imagecache.py:440 #, python-format msgid "Removing base file: %s" msgstr "正在移除基本檔案:%s" -#: nova/virt/libvirt/imagecache.py:459 +#: nova/virt/libvirt/imagecache.py:458 #, python-format msgid "image %(id)s at (%(base_file)s): checking" msgstr "映像檔 %(id)s (%(base_file)s):正在檢查" -#: nova/virt/libvirt/imagecache.py:483 +#: nova/virt/libvirt/imagecache.py:482 #, python-format msgid "" "image %(id)s at (%(base_file)s): in use: on this node %(local)d local, " @@ -326,17 +336,17 @@ msgstr "" "映像檔 %(id)s (%(base_file)s):使用中:%(local)d 個在此節點上(本" "端),%(remote)d 個在其他共用此實例儲存體的節點上" -#: nova/virt/libvirt/imagecache.py:550 +#: nova/virt/libvirt/imagecache.py:549 #, python-format msgid "Active base files: %s" msgstr "作用中的基本檔案:%s" -#: nova/virt/libvirt/imagecache.py:553 +#: nova/virt/libvirt/imagecache.py:552 #, python-format msgid "Corrupt base files: %s" msgstr "已毀損的基本檔案:%s" -#: nova/virt/libvirt/imagecache.py:557 +#: nova/virt/libvirt/imagecache.py:556 #, python-format msgid "Removable base files: %s" msgstr "可移除的基本檔案:%s" From 86a4bd7d9b34057f04ebbe2d2131f033d33d082b Mon Sep 17 00:00:00 2001 From: Adelina Tuvenie Date: Mon, 4 Aug 2014 19:02:16 +0300 Subject: [PATCH 403/486] Fixes Hyper-V vm state issue The method which gets VM related information can fail if the vm is in an intermediary state such as "Shutting down". The reason is that some of the Hyper-V specific vm states are not defined as possible states. This patch assures that a valid state is always returned by this method. Change-Id: I64e95f6b537e83a30dd64474f63f140e0e8e7373 Closes-Bug: #1352428 --- nova/virt/hyperv/vmutils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) mode change 100644 => 100755 nova/virt/hyperv/vmutils.py diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py old mode 100644 new mode 100755 index 81d2ed98f6..87c3d5258f --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -130,7 +130,13 @@ def get_vm_summary_info(self, vm_name): if si.UpTime is not None: up_time = long(si.UpTime) - enabled_state = self._enabled_states_map[si.EnabledState] + # Nova requires a valid state to be returned. Hyper-V has more + # states than Nova, typically intermediate ones and since there is + # no direct mapping for those, ENABLED is the only reasonable option + # considering that in all the non mappable states the instance + # is running. + enabled_state = self._enabled_states_map.get(si.EnabledState, + constants.HYPERV_VM_STATE_ENABLED) summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors, 'EnabledState': enabled_state, From af3ef1dc959735920d3fcfaf9ea2b22dc56f8ff5 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 24 Jul 2014 13:19:41 +0100 Subject: [PATCH 404/486] libvirt: reduce indentation in is_vif_model_valid_for_virt For some reason the is_vif_model_valid_for_virt method is using 8 space indentation instead of the normal 4 spaces. Change-Id: Ibbc86af5fbea3a115d344907a1d95fb184b61ebc --- nova/virt/libvirt/vif.py | 58 ++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 4885828e92..3abe5e8f57 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -48,35 +48,35 @@ def is_vif_model_valid_for_virt(virt_type, vif_model): - valid_models = { - 'qemu': [network_model.VIF_MODEL_VIRTIO, - network_model.VIF_MODEL_NE2K_PCI, - network_model.VIF_MODEL_PCNET, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000, - network_model.VIF_MODEL_SPAPR_VLAN], - 'kvm': [network_model.VIF_MODEL_VIRTIO, - network_model.VIF_MODEL_NE2K_PCI, - network_model.VIF_MODEL_PCNET, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000, - network_model.VIF_MODEL_SPAPR_VLAN], - 'xen': [network_model.VIF_MODEL_NETFRONT, - network_model.VIF_MODEL_NE2K_PCI, - network_model.VIF_MODEL_PCNET, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000], - 'lxc': [], - 'uml': [], - } - - if vif_model is None: - return True - - if virt_type not in valid_models: - raise exception.UnsupportedVirtType(virt=virt_type) - - return vif_model in valid_models[virt_type] + valid_models = { + 'qemu': [network_model.VIF_MODEL_VIRTIO, + network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_SPAPR_VLAN], + 'kvm': [network_model.VIF_MODEL_VIRTIO, + network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_SPAPR_VLAN], + 'xen': [network_model.VIF_MODEL_NETFRONT, + network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000], + 'lxc': [], + 'uml': [], + } + + if vif_model is None: + return True + + if virt_type not in valid_models: + raise exception.UnsupportedVirtType(virt=virt_type) + + return vif_model in valid_models[virt_type] class LibvirtGenericVIFDriver(object): From 26504d71ceaecf22f135d8321769db801290c405 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Wed, 19 Feb 2014 18:39:51 +1300 Subject: [PATCH 405/486] Update migration defaults In TripleO we wanted to enable live block migration. Pleasantly surprising was finding that all it required was better options to libvirt. I'm told this works with the libvirt we gate on, though as we don't gate migration yet, its hard to tell. Change-Id: I7b997666474ec324c5034e3660a7271b8423bca8 DocImpact: UpgradeImpact: --- nova/virt/libvirt/driver.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 082584f5ec..cbc3af9ca2 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -144,10 +144,12 @@ '(any included "%s" is replaced with ' 'the migration target hostname)'), cfg.StrOpt('live_migration_flag', - default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER', + default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' + 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED', help='Migration flags to be set for live migration'), cfg.StrOpt('block_migration_flag', default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' + 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, ' 'VIR_MIGRATE_NON_SHARED_INC', help='Migration flags to be set for block migration'), cfg.IntOpt('live_migration_bandwidth', From 1d17959994f865c1453e0b05f4be447aa976d603 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 18 Aug 2014 08:48:17 -0700 Subject: [PATCH 406/486] Objectify last uses of direct db access in network/floating_ips These were previously skipped since they're not used when all services are on master. However, they're trivial to convert and help clean up our reports of "what's left". Since we technically need to keep them in the tree until we bump the network RPC version, this converts them and adds tests. Related to blueprint compute-manager-objects-juno Change-Id: I17296a807524a76caa10e41ea2b05c7514169fa2 --- nova/network/floating_ips.py | 15 +++++---------- nova/tests/network/test_manager.py | 26 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py index 345f51dbb4..64398ce557 100644 --- a/nova/network/floating_ips.py +++ b/nova/network/floating_ips.py @@ -499,27 +499,22 @@ def get_floating_ip_by_address(self, context, address): """Returns a floating IP as a dict.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi. - # NOTE(danms): Not converting to objects since it's not used - return dict(self.db.floating_ip_get_by_address(context, - address).iteritems()) + return objects.FloatingIP.get_by_address(context, address) def get_floating_ips_by_project(self, context): """Returns the floating IPs allocated to a project.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi. - # NOTE(danms): Not converting to objects since it's not used - ips = self.db.floating_ip_get_all_by_project(context, + return objects.FloatingIPList.get_by_project(context, context.project_id) - return [dict(ip.iteritems()) for ip in ips] def get_floating_ips_by_fixed_address(self, context, fixed_address): """Returns the floating IPs associated with a fixed_address.""" # NOTE(vish): This is no longer used but can't be removed until # we major version the network_rpcapi. - # NOTE(danms): Not converting to objects since it's not used - floating_ips = self.db.floating_ip_get_by_fixed_address(context, - fixed_address) - return [floating_ip['address'] for floating_ip in floating_ips] + floating_ips = objects.FloatingIPList.get_by_fixed_address( + context, fixed_address) + return [str(floating_ip.address) for floating_ip in floating_ips] def _is_stale_floating_ip_address(self, context, floating_ip): try: diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 694f3d6e11..994ddd41c8 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -3027,6 +3027,32 @@ def test_associate_floating_ip_failure_interface_not_found(self): self._test_associate_floating_ip_failure('Cannot find device', exception.NoFloatingIpInterface) + @mock.patch('nova.objects.FloatingIP.get_by_address') + def test_get_floating_ip_by_address(self, mock_get): + mock_get.return_value = mock.sentinel.floating + self.assertEqual(mock.sentinel.floating, + self.network.get_floating_ip_by_address( + self.context, + mock.sentinel.address)) + mock_get.assert_called_once_with(self.context, mock.sentinel.address) + + @mock.patch('nova.objects.FloatingIPList.get_by_project') + def test_get_floating_ips_by_project(self, mock_get): + mock_get.return_value = mock.sentinel.floatings + self.assertEqual(mock.sentinel.floatings, + self.network.get_floating_ips_by_project( + self.context)) + mock_get.assert_called_once_with(self.context, self.context.project_id) + + @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address') + def test_get_floating_ips_by_fixed_address(self, mock_get): + mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'), + objects.FloatingIP(address='5.6.7.8')] + self.assertEqual(['1.2.3.4', '5.6.7.8'], + self.network.get_floating_ips_by_fixed_address( + self.context, mock.sentinel.address)) + mock_get.assert_called_once_with(self.context, mock.sentinel.address) + class InstanceDNSTestCase(test.TestCase): """Tests nova.network.manager instance DNS.""" From 40f75298c80e21d8fb60a03ca9b3b8183a9a8e91 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 18 Aug 2014 10:57:16 -0700 Subject: [PATCH 407/486] Objectify association in neutronapi This makes the neutronapi use the Instance object for the associate path and updates the test to match. Related to blueprint compute-manager-objects-juno Change-Id: I045bfbd16e3b45107bd11e3874ce47280574930a --- nova/network/neutronv2/api.py | 3 ++- nova/tests/network/test_neutronv2.py | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 971b612166..17162f62b9 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -30,6 +30,7 @@ from nova.network import neutronv2 from nova.network.neutronv2 import constants from nova.network.security_group import openstack_driver +from nova import objects from nova.openstack.common import excutils from nova.openstack.common import log as logging from nova.openstack.common import uuidutils @@ -844,7 +845,7 @@ def associate_floating_ip(self, context, instance, instance_id=orig_instance_uuid) LOG.info(_('re-assign floating IP %(address)s from ' 'instance %(instance_id)s') % msg_dict) - orig_instance = self.db.instance_get_by_uuid(context, + orig_instance = objects.Instance.get_by_uuid(context, orig_instance_uuid) # purge cached nw info for the original instance diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index d3eb08f2b6..6442c407ae 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -2023,7 +2023,8 @@ def test_associate_floating_ip(self): api.associate_floating_ip(self.context, self.instance, address, fixed_address) - def test_reassociate_floating_ip(self): + @mock.patch('nova.objects.Instance.get_by_uuid') + def test_reassociate_floating_ip(self, mock_get): api = neutronapi.API() address = self.fip_associated['floating_ip_address'] new_fixed_address = self.port_address @@ -2040,11 +2041,10 @@ def test_reassociate_floating_ip(self): 'fixed_ip_address': new_fixed_address}}) self.moxed_client.show_port(self.fip_associated['port_id']).\ AndReturn({'port': self.port_data2[1]}) - self.mox.StubOutWithMock(api.db, 'instance_get_by_uuid') - api.db.instance_get_by_uuid(mox.IgnoreArg(), - self.instance['uuid']).\ - AndReturn(self.instance) - self._setup_mock_for_refresh_cache(api, [self.instance, + + mock_get.return_value = fake_instance.fake_instance_obj( + self.context, **self.instance) + self._setup_mock_for_refresh_cache(api, [mock_get.return_value, self.instance2]) self.mox.ReplayAll() From 446fc3af044bd92fe2b9ca620e9159fd471f7e20 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Mon, 18 Aug 2014 23:58:49 +0000 Subject: [PATCH 408/486] Move fake_quotas and fake_get_quotas into a class fake_quotas and fake_get_quotas are used in ExtendedQuotasTest only, and this patch moves them into the class for cleanup. In addition, this patch moves the mox settings into setUp() method for the same purpose. Change-Id: I9920d6207bb27537e6d4e738d69d59f7ed255240 --- .../openstack/compute/contrib/test_quotas.py | 54 +++++++++---------- 1 file changed, 24 insertions(+), 30 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py index 57f703ea06..063e319dbd 100644 --- a/nova/tests/api/openstack/compute/contrib/test_quotas.py +++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py @@ -418,49 +418,46 @@ def test_deserializer(self): self.assertEqual(result, exemplar) -fake_quotas = {'ram': {'limit': 51200, - 'in_use': 12800, - 'reserved': 12800}, - 'cores': {'limit': 20, - 'in_use': 10, - 'reserved': 5}, - 'instances': {'limit': 100, - 'in_use': 0, - 'reserved': 0}} - - -def fake_get_quotas(self, context, id, user_id=None, usages=False): - if usages: - return fake_quotas - else: - return dict((k, v['limit']) for k, v in fake_quotas.items()) - - class ExtendedQuotasTest(test.TestCase): def setUp(self): super(ExtendedQuotasTest, self).setUp() self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager) self.controller = quotas.QuotaSetsController(self.ext_mgr) + self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) + self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) + self.mox.ReplayAll() + + fake_quotas = {'ram': {'limit': 51200, + 'in_use': 12800, + 'reserved': 12800}, + 'cores': {'limit': 20, + 'in_use': 10, + 'reserved': 5}, + 'instances': {'limit': 100, + 'in_use': 0, + 'reserved': 0}} + + def fake_get_quotas(self, context, id, user_id=None, usages=False): + if usages: + return self.fake_quotas + else: + return dict((k, v['limit']) for k, v in self.fake_quotas.items()) def test_quotas_update_exceed_in_used(self): body = {'quota_set': {'cores': 10}} self.stubs.Set(quotas.QuotaSetsController, '_get_quotas', - fake_get_quotas) + self.fake_get_quotas) req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) - self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) - self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) - self.mox.ReplayAll() - self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'update_me', body) def test_quotas_force_update_exceed_in_used(self): self.stubs.Set(quotas.QuotaSetsController, '_get_quotas', - fake_get_quotas) + self.fake_get_quotas) req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) expected = {'quota_set': {'ram': 25600, 'instances': 200, 'cores': 10}} @@ -468,13 +465,10 @@ def test_quotas_force_update_exceed_in_used(self): 'instances': 200, 'cores': 10, 'force': 'True'}} - fake_quotas.get('ram')['limit'] = 25600 - fake_quotas.get('cores')['limit'] = 10 - fake_quotas.get('instances')['limit'] = 200 + self.fake_quotas.get('ram')['limit'] = 25600 + self.fake_quotas.get('cores')['limit'] = 10 + self.fake_quotas.get('instances')['limit'] = 200 - self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) - self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) - self.mox.ReplayAll() res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, expected) From a4c074710d99141abd778115ca5e983a6713bc35 Mon Sep 17 00:00:00 2001 From: tanlin Date: Fri, 13 Jun 2014 10:46:01 +0800 Subject: [PATCH 409/486] Add missing create() method on SecurityGroupRule object The nova.objects.SecurityGroupRule class was missing the create() method, so we add it here, and while doing so, ensure that the id attribute is marked as read-only. Partial-Blueprint: compute-manager-objects-juno Change-Id: I4584e33d9d163a7dce01b01d205f2f43824e544f --- nova/objects/security_group_rule.py | 23 ++++++++++-- nova/tests/objects/test_objects.py | 4 +-- .../tests/objects/test_security_group_rule.py | 35 ++++++++++++++++--- 3 files changed, 53 insertions(+), 9 deletions(-) diff --git a/nova/objects/security_group_rule.py b/nova/objects/security_group_rule.py index a131a7173f..ac27b48a8e 100644 --- a/nova/objects/security_group_rule.py +++ b/nova/objects/security_group_rule.py @@ -13,6 +13,7 @@ # under the License. from nova import db +from nova import exception from nova import objects from nova.objects import base from nova.objects import fields @@ -22,10 +23,11 @@ class SecurityGroupRule(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version - VERSION = '1.0' + # Version 1.1: Added create() and set id as read_only + VERSION = '1.1' fields = { - 'id': fields.IntegerField(), + 'id': fields.IntegerField(read_only=True), 'protocol': fields.StringField(nullable=True), 'from_port': fields.IntegerField(nullable=True), 'to_port': fields.IntegerField(nullable=True), @@ -54,6 +56,21 @@ def _from_db_object(context, rule, db_rule, expected_attrs=None): rule.obj_reset_changes() return rule + @base.remotable + def create(self, context): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason='already created') + updates = self.obj_get_changes() + parent_group = updates.pop('parent_group', None) + if parent_group: + updates['parent_group_id'] = parent_group.id + grantee_group = updates.pop('grantee_group', None) + if grantee_group: + updates['group_id'] = grantee_group.id + db_rule = db.security_group_rule_create(context, updates) + self._from_db_object(context, self, db_rule) + @base.remotable_classmethod def get_by_id(cls, context, rule_id): db_rule = db.security_group_rule_get(context, rule_id) @@ -64,8 +81,10 @@ class SecurityGroupRuleList(base.ObjectListBase, base.NovaObject): fields = { 'objects': fields.ListOfObjectsField('SecurityGroupRule'), } + VERSION = '1.1' child_versions = { '1.0': '1.0', + '1.1': '1.1', } @base.remotable_classmethod diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index a4eadcfce8..8661860bb6 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -970,8 +970,8 @@ def test_object_serialization_iterables(self): 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2', 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b', 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f', - 'SecurityGroupRule': '1.0-fdd020bdd7eb8bac744ad6f9a4ef8165', - 'SecurityGroupRuleList': '1.0-1052b37dc59a1957ee5b0b9268d03af3', + 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576', + 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c', 'Service': '1.2-5a3df338c669e1148251431370b440ef', 'ServiceList': '1.0-2c960ac9bc56a12c65b9118bb3a58b44', 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd', diff --git a/nova/tests/objects/test_security_group_rule.py b/nova/tests/objects/test_security_group_rule.py index 4cd7d95758..e2c5294403 100644 --- a/nova/tests/objects/test_security_group_rule.py +++ b/nova/tests/objects/test_security_group_rule.py @@ -15,8 +15,8 @@ import mock from nova import db -from nova.objects import security_group -from nova.objects import security_group_rule +from nova import exception +from nova import objects from nova.tests.objects import test_objects from nova.tests.objects import test_security_group @@ -37,7 +37,7 @@ class _TestSecurityGroupRuleObject(object): def test_get_by_id(self): with mock.patch.object(db, 'security_group_rule_get') as sgrg: sgrg.return_value = fake_rule - rule = security_group_rule.SecurityGroupRule.get_by_id( + rule = objects.SecurityGroupRule.get_by_id( self.context, 1) for field in fake_rule: if field == 'cidr': @@ -47,18 +47,43 @@ def test_get_by_id(self): sgrg.assert_called_with(self.context, 1) def test_get_by_security_group(self): - secgroup = security_group.SecurityGroup() + secgroup = objects.SecurityGroup() secgroup.id = 123 rule = dict(fake_rule) rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123) stupid_method = 'security_group_rule_get_by_security_group' with mock.patch.object(db, stupid_method) as sgrgbsg: sgrgbsg.return_value = [rule] - rules = (security_group_rule.SecurityGroupRuleList. + rules = (objects.SecurityGroupRuleList. get_by_security_group(self.context, secgroup)) self.assertEqual(1, len(rules)) self.assertEqual(123, rules[0].grantee_group.id) + @mock.patch.object(db, 'security_group_rule_create', + return_value=fake_rule) + def test_create(self, db_mock): + rule = objects.SecurityGroupRule() + rule.protocol = 'tcp' + secgroup = objects.SecurityGroup() + secgroup.id = 123 + parentgroup = objects.SecurityGroup() + parentgroup.id = 223 + rule.grantee_group = secgroup + rule.parent_group = parentgroup + rule.create(self.context) + updates = db_mock.call_args[0][1] + self.assertEqual(fake_rule['id'], rule.id) + self.assertEqual(updates['group_id'], rule.grantee_group.id) + self.assertEqual(updates['parent_group_id'], rule.parent_group.id) + + @mock.patch.object(db, 'security_group_rule_create', + return_value=fake_rule) + def test_set_id_failure(self, db_mock): + rule = objects.SecurityGroupRule() + rule.create(self.context) + self.assertRaises(exception.ReadOnlyFieldError, setattr, + rule, 'id', 124) + class TestSecurityGroupRuleObject(test_objects._LocalTest, _TestSecurityGroupRuleObject): From 3ca0817d1f78e8f63610b5d7667752f7f4a05b51 Mon Sep 17 00:00:00 2001 From: Qin Zhao Date: Tue, 19 Aug 2014 14:56:12 +0800 Subject: [PATCH 410/486] Libvirt: Do not raise ENOENT exception If the disk disappears during update_status(), only need to print a warning, and do not need to raise an exception. Change-Id: I87800c855c7b349f69163e33031404ad25e2a9ca Closes-Bug: 1358609 --- nova/virt/libvirt/driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 082584f5ec..3bfdde9d9e 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -5136,7 +5136,7 @@ def _get_disk_over_committed_size_total(self): 'but disk file was removed by concurrent ' 'operations such as resize.'), {'i_name': dom.name()}) - if e.errno == errno.EACCES: + elif e.errno == errno.EACCES: LOG.warn(_LW('Periodic task is updating the host stat, ' 'it is trying to get disk %(i_name)s, ' 'but access is denied. It is most likely ' From c0266b80ad442398577c7a37f250f2d429733ad4 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Thu, 7 Aug 2014 07:39:17 +0000 Subject: [PATCH 411/486] Add some v2 agents API tests On most APIs, the unit test coverage of v3 is better than the one of v2. This patch ports v3 agents API tests to v2 tests for improving v2 agent API tests. Change-Id: I045c8fa64eb4cd2bf5852d4656d1067ec12a8564 --- .../openstack/compute/contrib/test_agents.py | 61 +++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/api/openstack/compute/contrib/test_agents.py index 7373efeb27..7d21292d5e 100644 --- a/nova/tests/api/openstack/compute/contrib/test_agents.py +++ b/nova/tests/api/openstack/compute/contrib/test_agents.py @@ -118,14 +118,45 @@ def test_agents_create(self): res_dict = self.controller.create(req, body) self.assertEqual(res_dict, response) - def test_agents_create_key_error(self): + def _test_agents_create_key_error(self, key): req = FakeRequest() - body = {'agent': {'hypervisordummy': 'kvm', + body = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + body['agent'].pop(key) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_agents_create_without_hypervisor(self): + self._test_agents_create_key_error('hypervisor') + + def test_agents_create_without_os(self): + self._test_agents_create_key_error('os') + + def test_agents_create_without_architecture(self): + self._test_agents_create_key_error('architecture') + + def test_agents_create_without_version(self): + self._test_agents_create_key_error('version') + + def test_agents_create_without_url(self): + self._test_agents_create_key_error('url') + + def test_agents_create_without_md5hash(self): + self._test_agents_create_key_error('md5hash') + + def test_agents_create_with_wrong_type(self): + req = FakeRequest() + body = {'agent': None} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_agents_create_with_empty_type(self): + req = FakeRequest() + body = {} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) @@ -239,11 +270,33 @@ def test_agents_update(self): res_dict = self.controller.update(req, 1, body) self.assertEqual(res_dict, response) - def test_agents_update_key_error(self): + def _test_agents_update_key_error(self, key): req = FakeRequest() - body = {'para': {'versiondummy': '7.0', + body = {'para': {'version': '7.0', 'url': 'xxx://xxxx/xxx/xxx', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + body['para'].pop(key) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, 1, body) + + def test_agents_update_without_version(self): + self._test_agents_update_key_error('version') + + def test_agents_update_without_url(self): + self._test_agents_update_key_error('url') + + def test_agents_update_without_md5hash(self): + self._test_agents_update_key_error('md5hash') + + def test_agents_update_with_wrong_type(self): + req = FakeRequest() + body = {'agent': None} + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.update, req, 1, body) + + def test_agents_update_with_empty(self): + req = FakeRequest() + body = {} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, body) From 920dcf9c51fad2262ed03c05ba57c369ffcd4755 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 12 Aug 2014 08:16:40 -0700 Subject: [PATCH 412/486] VMware: ensure test case for init_host in driver Commit 1deb31f85a8f5d1e261b2cf1eddc537a5da7f60b caused a regression. This patch ensures that there is a unit test validating that the driver implements the method. Change-Id: I6086aeb1e9a8937d13aae41d8a4b74c25bb802f6 --- nova/tests/virt/vmwareapi/test_driver_api.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 58a7c2765b..c690e1ea0e 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -417,6 +417,18 @@ def tearDown(self): def test_get_host_ip_addr(self): self.assertEqual('test_url', self.conn.get_host_ip_addr()) + def test_init_host_with_no_session(self): + self.conn._session = mock.Mock() + self.conn._session.vim = None + self.conn.init_host('fake_host') + self.conn._session._create_session.assert_called_once_with() + + def test_init_host(self): + try: + self.conn.init_host("fake_host") + except Exception as ex: + self.fail("init_host raised: %s" % ex) + def _set_exception_vars(self): self.wait_task = self.conn._session._wait_for_task self.call_method = self.conn._session._call_method From 230471b645c256641249e1678aab343aecc10d75 Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Wed, 30 Jul 2014 14:48:20 +0200 Subject: [PATCH 413/486] Make usage_from_instances consider current usage VirtNUMAHostTopology.usage_from_instances will now consider the current usage of the passed host, and also accepts a 'free' keyword argument, which if set to True, will cause the instance usage to be taken out of the considered host. Also adds better handling of None and emtpy params. Blueprint: virt-driver-numa-placement Change-Id: I62c59b5e1dcbd4db247282f170c6f2f501bafd3d --- nova/tests/virt/test_hardware.py | 70 ++++++++++++++++++++++++++++++++ nova/virt/hardware.py | 24 +++++++---- 2 files changed, 87 insertions(+), 7 deletions(-) diff --git a/nova/tests/virt/test_hardware.py b/nova/tests/virt/test_hardware.py index 228f4ca871..754b96dd2b 100644 --- a/nova/tests/virt/test_hardware.py +++ b/nova/tests/virt/test_hardware.py @@ -920,6 +920,76 @@ def test_host_usage_sparse(self): self.assertEqual(hostusage.cells[2].cpu_usage, 1) self.assertEqual(hostusage.cells[2].memory_usage, 256) + def test_host_usage_culmulative_with_free(self): + hosttopo = hw.VirtNUMAHostTopology([ + hw.VirtNUMATopologyCellUsage( + 0, set([0, 1, 2, 3]), 1024, cpu_usage=2, memory_usage=512), + hw.VirtNUMATopologyCellUsage( + 1, set([4, 6]), 512, cpu_usage=1, memory_usage=512), + hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 256), + ]) + instance1 = hw.VirtNUMAInstanceTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 512), + hw.VirtNUMATopologyCell(1, set([3]), 256), + hw.VirtNUMATopologyCell(2, set([4]), 256)]) + + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + hosttopo, [instance1]) + self.assertIsInstance(hostusage.cells[0], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hostusage.cells[0].cpu_usage, 5) + self.assertEqual(hostusage.cells[0].memory_usage, 1024) + + self.assertIsInstance(hostusage.cells[1], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hostusage.cells[1].cpu_usage, 2) + self.assertEqual(hostusage.cells[1].memory_usage, 768) + + self.assertIsInstance(hostusage.cells[2], + hw.VirtNUMATopologyCellUsage) + self.assertEqual(hostusage.cells[2].cpu_usage, 1) + self.assertEqual(hostusage.cells[2].memory_usage, 256) + + # Test freeing of resources + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + hostusage, [instance1], free=True) + self.assertEqual(hostusage.cells[0].cpu_usage, 2) + self.assertEqual(hostusage.cells[0].memory_usage, 512) + + self.assertEqual(hostusage.cells[1].cpu_usage, 1) + self.assertEqual(hostusage.cells[1].memory_usage, 512) + + self.assertEqual(hostusage.cells[2].cpu_usage, 0) + self.assertEqual(hostusage.cells[2].memory_usage, 0) + + def test_topo_usage_none(self): + hosttopo = hw.VirtNUMAHostTopology([ + hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512), + hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512), + ]) + instance1 = hw.VirtNUMAInstanceTopology([ + hw.VirtNUMATopologyCell(0, set([0, 1]), 256), + hw.VirtNUMATopologyCell(2, set([2]), 256), + ]) + + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + None, [instance1]) + self.assertIsNone(hostusage) + + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + hosttopo, []) + self.assertEqual(hostusage.cells[0].cpu_usage, 0) + self.assertEqual(hostusage.cells[0].memory_usage, 0) + self.assertEqual(hostusage.cells[1].cpu_usage, 0) + self.assertEqual(hostusage.cells[1].memory_usage, 0) + + hostusage = hw.VirtNUMAHostTopology.usage_from_instances( + hosttopo, None) + self.assertEqual(hostusage.cells[0].cpu_usage, 0) + self.assertEqual(hostusage.cells[0].memory_usage, 0) + self.assertEqual(hostusage.cells[1].cpu_usage, 0) + self.assertEqual(hostusage.cells[1].memory_usage, 0) + def _test_to_dict(self, cell_or_topo, expected): got = cell_or_topo._to_dict() self.assertThat(expected, matchers.DictMatches(got)) diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py index da38cdd24b..0f378fbdd4 100644 --- a/nova/virt/hardware.py +++ b/nova/virt/hardware.py @@ -596,6 +596,9 @@ def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, str(self._to_dict())) + def _to_dict(self): return {'cells': [cell._to_dict() for cell in self.cells]} @@ -742,11 +745,12 @@ class VirtNUMAHostTopology(VirtNUMATopology): cell_class = VirtNUMATopologyCellUsage @classmethod - def usage_from_instances(cls, host, instances): + def usage_from_instances(cls, host, instances, free=False): """Get host topology usage - :param host: VirtNUMAHostTopology without usage information + :param host: VirtNUMAHostTopology with usage information :param instances: list of VirtNUMAInstanceTopology + :param free: If True usage of the host will be decreased Sum the usage from all @instances to report the overall host topology usage @@ -754,19 +758,25 @@ def usage_from_instances(cls, host, instances): :returns: VirtNUMAHostTopology including usage information """ + if host is None: + return + + instances = instances or [] cells = [] + sign = -1 if free else 1 for hostcell in host.cells: - memory_usage = 0 - cpu_usage = 0 + memory_usage = hostcell.memory_usage + cpu_usage = hostcell.cpu_usage for instance in instances: for instancecell in instance.cells: if instancecell.id == hostcell.id: - memory_usage = memory_usage + instancecell.memory - cpu_usage = cpu_usage + len(instancecell.cpuset) + memory_usage = ( + memory_usage + sign * instancecell.memory) + cpu_usage = cpu_usage + sign * len(instancecell.cpuset) cell = cls.cell_class( hostcell.id, hostcell.cpuset, hostcell.memory, - cpu_usage, memory_usage) + max(0, cpu_usage), max(0, memory_usage)) cells.append(cell) From 3a5919fd4af6c3b772397a5e7d90eebdf9b371af Mon Sep 17 00:00:00 2001 From: Simona Iuliana Toader Date: Fri, 8 Aug 2014 16:55:15 +0300 Subject: [PATCH 414/486] Fixes Hyper-V resize down exception The Hyper-V driver does not support resize down and is currently raising an exception if the user attempts to do that, causing the instance to go in ERROR state. The driver should use the recently introduced instance faults "exception.InstanceFaultRollback" instead, which will leave the instance in ACTIVE state as expected. Closes-Bug: #1354448 Change-Id: Ibaf8482562094cd2b3165dc62a907fa9e0e56e19 --- nova/tests/virt/hyperv/test_hypervapi.py | 2 +- nova/virt/hyperv/migrationops.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 84a1802364..edb955e299 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -1504,7 +1504,7 @@ def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self): (instance, fake_dest_ip, network_info, flavor) = args self._mox.ReplayAll() - self.assertRaises(vmutils.VHDResizeException, + self.assertRaises(exception.InstanceFaultRollback, self._conn.migrate_disk_and_power_off, self._context, instance, fake_dest_ip, flavor, network_info) diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py index a1bc934bd0..a803d4f797 100644 --- a/nova/virt/hyperv/migrationops.py +++ b/nova/virt/hyperv/migrationops.py @@ -18,6 +18,7 @@ """ import os +from nova import exception from nova.i18n import _ from nova.openstack.common import excutils from nova.openstack.common import log as logging @@ -102,11 +103,13 @@ def _check_target_flavor(self, instance, flavor): curr_root_gb = instance['root_gb'] if new_root_gb < curr_root_gb: - raise vmutils.VHDResizeException( - _("Cannot resize the root disk to a smaller size. Current " - "size: %(curr_root_gb)s GB. Requested size: " - "%(new_root_gb)s GB") % - {'curr_root_gb': curr_root_gb, 'new_root_gb': new_root_gb}) + raise exception.InstanceFaultRollback( + vmutils.VHDResizeException( + _("Cannot resize the root disk to a smaller size. " + "Current size: %(curr_root_gb)s GB. Requested size: " + "%(new_root_gb)s GB") % + {'curr_root_gb': curr_root_gb, + 'new_root_gb': new_root_gb})) def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, From 4431eec1c94c4a353b45e5d873854b3fb1eaa11b Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 28 Jul 2014 15:15:44 +0100 Subject: [PATCH 415/486] libvirt: make sysinfo serial number configurable The 'serial' field in guest SMBIOS tables gets populated based on the libvirt reported UUID of the host hardware. The rationale is to allow correlation of guests running on the same host. Unfortunately some hardware vendors use a subset of the host UUID as a key for retrieving hardware support contract information without requiring any authentication. So exposing the host UUID to the guest is an information leak for those vendors. It is possible to override the use of SMBIOS data by libvirt in /etc/libvirt/libvirtd.conf by setting the 'host_uuid' parameter. As a way to reduce the configuration burden though, it is preferrable to use the /etc/machine-id UUID, instead of the host hardware UUID. The former is a recent standard for Linux distros introduced by systemd to provide a UUID that is unique per operating system install. This means that even containers will see a separate /etc/machine-id value. This /etc/machine-id can be expected to be widely available in current and future distros. If missing, it is still possible to fallback to the libvirt reported host UUID. The host UUID exposed could theoretically be leveraged by a cloud user to get an approximate count of the number of unique hosts available to them in the cloud by launching many short lived VMs. Administrators concerned about this may wish to disable reporting of any sysinfo serial field at all. Introduce a 'sysinfo_serial' config parameter to the libvirt driver to control behaviour, accepting values: - 'auto' - try /etc/machine-id, fallback to libvirt reported host UUID (new default) - 'hardware' - always use libvirt host UUID (old default) - 'os' - always use /etc/machine-id, error if missing - 'none' - do not report any value to the guest DocImpact: new libvirt.sysinfo_serial config parameter SecurityImpact Closes-bug: #1337349 Change-Id: I7ba7dbd65e913a66efe35a1d6490a85bec8413da --- nova/tests/virt/libvirt/test_driver.py | 116 +++++++++++++++++++++++++ nova/virt/libvirt/driver.py | 54 +++++++++++- 2 files changed, 169 insertions(+), 1 deletion(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index f39f849324..6cff20312c 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -419,6 +419,11 @@ def setUp(self): self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.LibvirtDriver._get_host_uuid', lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686')) + # Prevent test suite trying to find /etc/machine-id + # which isn't guaranteed to exist. Instead it will use + # the host UUID from libvirt which we mock above + self.flags(sysinfo_serial="hardware", group="libvirt") + self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) @@ -2142,6 +2147,117 @@ def test_get_guest_config_with_bogus_cpu_quota(self): conn._get_guest_config, instance_ref, [], {}, disk_info) + def _test_get_guest_config_sysinfo_serial(self, expected_serial): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + + instance_ref = db.instance_create(self.context, self.test_instance) + cfg = drvr._get_guest_config_sysinfo(instance_ref) + + self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo) + self.assertEqual(version.vendor_string(), + cfg.system_manufacturer) + self.assertEqual(version.product_string(), + cfg.system_product) + self.assertEqual(version.version_string_with_package(), + cfg.system_version) + self.assertEqual(expected_serial, + cfg.system_serial) + self.assertEqual(instance_ref['uuid'], + cfg.system_uuid) + + def test_get_guest_config_sysinfo_serial_none(self): + self.flags(sysinfo_serial="none", group="libvirt") + self._test_get_guest_config_sysinfo_serial(None) + + @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_uuid") + def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid): + self.flags(sysinfo_serial="hardware", group="libvirt") + + theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" + mock_uuid.return_value = theuuid + + self._test_get_guest_config_sysinfo_serial(theuuid) + + def test_get_guest_config_sysinfo_serial_os(self): + self.flags(sysinfo_serial="os", group="libvirt") + + real_open = __builtin__.open + with contextlib.nested( + mock.patch.object(__builtin__, "open"), + ) as (mock_open, ): + theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" + + def fake_open(filename, *args, **kwargs): + if filename == "/etc/machine-id": + h = mock.MagicMock() + h.read.return_value = theuuid + h.__enter__.return_value = h + return h + return real_open(filename, *args, **kwargs) + + mock_open.side_effect = fake_open + + self._test_get_guest_config_sysinfo_serial(theuuid) + + def test_get_guest_config_sysinfo_serial_auto_hardware(self): + self.flags(sysinfo_serial="auto", group="libvirt") + + real_exists = os.path.exists + with contextlib.nested( + mock.patch.object(os.path, "exists"), + mock.patch.object(libvirt_driver.LibvirtDriver, + "_get_host_uuid") + ) as (mock_exists, mock_uuid): + def fake_exists(filename): + if filename == "/etc/machine-id": + return False + return real_exists(filename) + + mock_exists.side_effect = fake_exists + + theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" + mock_uuid.return_value = theuuid + + self._test_get_guest_config_sysinfo_serial(theuuid) + + def test_get_guest_config_sysinfo_serial_auto_os(self): + self.flags(sysinfo_serial="auto", group="libvirt") + + real_exists = os.path.exists + real_open = __builtin__.open + with contextlib.nested( + mock.patch.object(os.path, "exists"), + mock.patch.object(__builtin__, "open"), + ) as (mock_exists, mock_open): + def fake_exists(filename): + if filename == "/etc/machine-id": + return True + return real_exists(filename) + + mock_exists.side_effect = fake_exists + + theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" + + def fake_open(filename, *args, **kwargs): + if filename == "/etc/machine-id": + h = mock.MagicMock() + h.read.return_value = theuuid + h.__enter__.return_value = h + return h + return real_open(filename, *args, **kwargs) + + mock_open.side_effect = fake_open + + self._test_get_guest_config_sysinfo_serial(theuuid) + + def test_get_guest_config_sysinfo_serial_invalid(self): + self.flags(sysinfo_serial="invalid", group="libvirt") + + self.assertRaises(exception.NovaException, + libvirt_driver.LibvirtDriver, + fake.FakeVirtAPI(), + True) + def _create_fake_service_compute(self): service_info = { 'host': 'fake', diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 082584f5ec..2f4b927752 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -218,6 +218,12 @@ 'the "virsh capabilities"command. The format of the ' 'value for this config option is host-arch=machine-type. ' 'For example: x86_64=machinetype1,armv7l=machinetype2'), + cfg.StrOpt('sysinfo_serial', + default='auto', + help='The data source used to the populate the host "serial" ' + 'UUID exposed to guest in the virtual BIOS. Permitted ' + 'options are "hardware", "os", "none" or "auto" ' + '(default).'), ] CONF = cfg.CONF @@ -382,6 +388,23 @@ def __init__(self, virtapi, read_only=False): self._volume_api = volume.API() self._image_api = image.API() + sysinfo_serial_funcs = { + 'none': lambda: None, + 'hardware': self._get_host_sysinfo_serial_hardware, + 'os': self._get_host_sysinfo_serial_os, + 'auto': self._get_host_sysinfo_serial_auto, + } + + self._sysinfo_serial_func = sysinfo_serial_funcs.get( + CONF.libvirt.sysinfo_serial) + if not self._sysinfo_serial_func: + raise exception.NovaException( + _("Unexpected sysinfo_serial setting '%(actual)s'. " + "Permitted values are %(expect)s'") % + {'actual': CONF.libvirt.sysinfo_serial, + 'expect': ', '.join("'%s'" % k for k in + sysinfo_serial_funcs.keys())}) + @property def disk_cachemode(self): if self._disk_cachemode is None: @@ -3214,6 +3237,35 @@ def _get_guest_storage_config(self, instance, image_meta, return devices + def _get_host_sysinfo_serial_hardware(self): + """Get a UUID from the host hardware + + Get a UUID for the host hardware reported by libvirt. + This is typically from the SMBIOS data, unless it has + been overridden in /etc/libvirt/libvirtd.conf + """ + return self._get_host_uuid() + + def _get_host_sysinfo_serial_os(self): + """Get a UUID from the host operating system + + Get a UUID for the host operating system. Modern Linux + distros based on systemd provide a /etc/machine-id + file containing a UUID. This is also provided inside + systemd based containers and can be provided by other + init systems too, since it is just a plain text file. + """ + with open("/etc/machine-id") as f: + # We want to have '-' in the right place + # so we parse & reformat the value + return str(uuid.UUID(f.read().split()[0])) + + def _get_host_sysinfo_serial_auto(self): + if os.path.exists("/etc/machine-id"): + return self._get_host_sysinfo_serial_os() + else: + return self._get_host_sysinfo_serial_hardware() + def _get_guest_config_sysinfo(self, instance): sysinfo = vconfig.LibvirtConfigGuestSysinfo() @@ -3221,7 +3273,7 @@ def _get_guest_config_sysinfo(self, instance): sysinfo.system_product = version.product_string() sysinfo.system_version = version.version_string_with_package() - sysinfo.system_serial = self._get_host_uuid() + sysinfo.system_serial = self._sysinfo_serial_func() sysinfo.system_uuid = instance['uuid'] return sysinfo From cfd17a2ca65bab9ed85734b00f88b0e7236f0514 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 15 Aug 2014 13:55:15 +0000 Subject: [PATCH 416/486] objects: Make use of utils.convert_version_to_tuple() Make use of the convert_version_to_tuple() utility in objects that had inlined an implementation of the same thing. Change-Id: If48df452baa79eeb52218fcbad0e388edf8fd416 --- nova/objects/compute_node.py | 4 ++-- nova/objects/instance.py | 3 +-- nova/objects/network.py | 3 ++- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py index 3171816e32..27b52b1294 100644 --- a/nova/objects/compute_node.py +++ b/nova/objects/compute_node.py @@ -18,6 +18,7 @@ from nova.objects import base from nova.objects import fields from nova.openstack.common import jsonutils +from nova import utils class ComputeNode(base.NovaPersistentObject, base.NovaObject): @@ -52,8 +53,7 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject): } def obj_make_compatible(self, primitive, target_version): - target_version = (int(target_version.split('.')[0]), - int(target_version.split('.')[1])) + target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 4) and 'host_ip' in primitive: del primitive['host_ip'] if target_version < (1, 3) and 'stats' in primitive: diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 263b327731..b82ed23fe1 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -195,8 +195,7 @@ def _obj_from_primitive(cls, context, objver, primitive): return self def obj_make_compatible(self, primitive, target_version): - target_version = (int(target_version.split('.')[0]), - int(target_version.split('.')[1])) + target_version = utils.convert_version_to_tuple(target_version) unicode_attributes = ['user_id', 'project_id', 'image_ref', 'kernel_id', 'ramdisk_id', 'hostname', 'key_name', 'key_data', 'host', 'node', diff --git a/nova/objects/network.py b/nova/objects/network.py index 829ff51834..98c09d1c86 100644 --- a/nova/objects/network.py +++ b/nova/objects/network.py @@ -20,6 +20,7 @@ from nova import objects from nova.objects import base as obj_base from nova.objects import fields +from nova import utils network_opts = [ cfg.BoolOpt('share_dhcp_address', @@ -97,7 +98,7 @@ def _convert_legacy_ipv6_netmask(netmask): 'or integral prefix' % netmask) def obj_make_compatible(self, primitive, target_version): - target_version = tuple(int(x) for x in target_version.split('.')) + target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 2): if 'mtu' in primitive: del primitive['mtu'] From c07ed15415c0ec3c5862f437f440632eff1e94df Mon Sep 17 00:00:00 2001 From: Phil Day Date: Fri, 24 Jan 2014 15:43:20 +0000 Subject: [PATCH 417/486] Power off commands should give guests a chance to shutdown Currently in libvirt operations which power off an instance such as stop, shelve, rescue, and resize simply destroy the underlying VM. Some GuestOS's do not react well to this type of power failure, and so it would be better if these operations followed the same approach as soft_reboot and give the guest as chance to shutdown gracefully. The shutdown behavior is defined by two values: - shutdown_timeout defines the overall period a Guest is allowed to complete it's shutdown. The default valus is set via nova.conf and can be overridden on a per image basis by image metadata allowing different types of guest OS to specify how long they need to shutdown cleanly. - shutdown_retry_interval defines how frequently within that period the Guest will be signaled to shutdown. This is a protection against guests that may not be ready to process the shutdown signal when it is first issued. (e.g. still booting). This is defined as a constant. This is one of a set of changes that will eventually expose the choice of whether to give the GuestOS a chance to shutdown via the API. This change implements the libvirt changes to power_off() and adds a clean shutdown to compute.manager.stop(). Subsequent patches will: - Add clean shutdown to Shelve - Add clean shutdown to Rescue - Convert soft_reboot to use the same approach - Expose clean shutdown via rpcapi - Expose clean shutdown via API Partially-Implements: blueprint user-defined-shutdown Closes-Bug: #1196924 DocImpact Change-Id: I432b0b0c09db82797f28deb5617f02ee45a4278c --- nova/compute/manager.py | 32 +++++++- nova/compute/utils.py | 19 +++++ nova/tests/api/ec2/test_cloud.py | 3 +- nova/tests/compute/test_compute.py | 3 +- nova/tests/compute/test_compute_utils.py | 22 ++++++ nova/tests/virt/libvirt/test_driver.py | 76 +++++++++++++++++++ nova/tests/virt/test_ironic_api_contracts.py | 2 +- nova/virt/baremetal/driver.py | 3 +- nova/virt/driver.py | 5 +- nova/virt/fake.py | 2 +- nova/virt/hyperv/driver.py | 3 +- nova/virt/libvirt/driver.py | 79 +++++++++++++++++++- nova/virt/vmwareapi/driver.py | 3 +- nova/virt/xenapi/driver.py | 3 +- 14 files changed, 243 insertions(+), 12 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 660785e1ad..83463db6a4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -193,6 +193,10 @@ default=0, help="Automatically confirm resizes after N seconds. " "Set to 0 to disable."), + cfg.IntOpt("shutdown_timeout", + default=60, + help="Total amount of time to wait in seconds for an instance " + "to perform a clean shutdown."), ] running_deleted_opts = [ @@ -568,6 +572,11 @@ class ComputeManager(manager.Manager): target = messaging.Target(version='3.32') + # How long to wait in seconds before re-issuing a shutdown + # signal to a instance during power off. The overall + # time to wait is set by CONF.shutdown_timeout. + SHUTDOWN_RETRY_INTERVAL = 10 + def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" self.virtapi = ComputeVirtAPI(self) @@ -2186,6 +2195,25 @@ def _try_deallocate_network(self, context, instance, instance=instance) self._set_instance_error_state(context, instance) + def _get_power_off_values(self, context, instance, clean_shutdown): + """Get the timing configuration for powering down this instance.""" + if clean_shutdown: + timeout = compute_utils.get_value_from_system_metadata(instance, + key='image_os_shutdown_timeout', type=int, + default=CONF.shutdown_timeout) + retry_interval = self.SHUTDOWN_RETRY_INTERVAL + else: + timeout = 0 + retry_interval = 0 + + return timeout, retry_interval + + def _power_off_instance(self, context, instance, clean_shutdown=True): + """Power off an instance on this host.""" + timeout, retry_interval = self._get_power_off_values(context, + instance, clean_shutdown) + self.driver.power_off(instance, timeout, retry_interval) + def _shutdown_instance(self, context, instance, bdms, requested_networks=None, notify=True, try_deallocate_networks=True): @@ -2377,14 +2405,14 @@ def do_terminate_instance(instance, bdms): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def stop_instance(self, context, instance): + def stop_instance(self, context, instance, clean_shutdown=True): """Stopping an instance on this host.""" @utils.synchronized(instance.uuid) def do_stop_instance(): self._notify_about_instance_usage(context, instance, "power_off.start") - self.driver.power_off(instance) + self._power_off_instance(context, instance, clean_shutdown) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.STOPPED diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 775698c482..6a00c13897 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -219,6 +219,25 @@ def get_image_metadata(context, image_api, image_id_or_uri, instance): return utils.get_image_from_system_metadata(system_meta) +def get_value_from_system_metadata(instance, key, type, default): + """Get a value of a specified type from image metadata. + + @param instance: The instance object + @param key: The name of the property to get + @param type: The python type the value is be returned as + @param default: The value to return if key is not set or not the right type + """ + value = instance.system_metadata.get(key, default) + try: + return type(value) + except ValueError: + LOG.warning(_LW("Metadata value %(value)s for %(key)s is not of " + "type %(type)s. Using default value %(default)s."), + {'value': value, 'key': key, 'type': type, + 'default': default}, instance=instance) + return default + + def notify_usage_exists(notifier, context, instance_ref, current_period=False, ignore_missing_network_data=True, system_metadata=None, extra_usage_info=None): diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index db8a7177d8..34f16a7188 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -2539,7 +2539,8 @@ def fake_power_on(self, context, instance, network_info, self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on) - def fake_power_off(self, instance): + def fake_power_off(self, instance, + shutdown_timeout, shutdown_attempts): virt_driver['powered_off'] = True self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 8991a7ab60..5d24778bcf 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -2134,7 +2134,8 @@ def test_power_off(self): called = {'power_off': False} - def fake_driver_power_off(self, instance): + def fake_driver_power_off(self, instance, + shutdown_timeout, shutdown_attempts): called['power_off'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off', diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py index a9d13b00bb..9d841f592e 100644 --- a/nova/tests/compute/test_compute_utils.py +++ b/nova/tests/compute/test_compute_utils.py @@ -724,6 +724,28 @@ def test_get_image_meta_no_image_no_image_system_meta(self): self.assertThat(expected, matchers.DictMatches(image_meta)) +class ComputeUtilsGetValFromSysMetadata(test.TestCase): + + def test_get_value_from_system_metadata(self): + instance = fake_instance.fake_instance_obj('fake-context') + system_meta = {'int_val': 1, + 'int_string': '2', + 'not_int': 'Nope'} + instance.system_metadata = system_meta + + result = compute_utils.get_value_from_system_metadata( + instance, 'int_val', int, 0) + self.assertEqual(1, result) + + result = compute_utils.get_value_from_system_metadata( + instance, 'int_string', int, 0) + self.assertEqual(2, result) + + result = compute_utils.get_value_from_system_metadata( + instance, 'not_int', int, 0) + self.assertEqual(0, result) + + class ComputeUtilsGetNWInfo(test.TestCase): def test_instance_object_none_info_cache(self): inst = fake_instance.fake_instance_obj('fake-context', diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index b1b80437a0..f8ae1616f6 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -6193,6 +6193,82 @@ def _get_inst(with_meta=True): conn._hard_reboot(self.context, instance, network_info, block_device_info) + def _test_clean_shutdown(self, seconds_to_shutdown, + timeout, retry_interval, + shutdown_attempts, succeeds): + info_tuple = ('fake', 'fake', 'fake', 'also_fake') + shutdown_count = [] + + def count_shutdowns(): + shutdown_count.append("shutdown") + + # Mock domain + mock_domain = self.mox.CreateMock(libvirt.virDomain) + + mock_domain.info().AndReturn( + (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple) + mock_domain.shutdown().WithSideEffects(count_shutdowns) + + retry_countdown = retry_interval + for x in xrange(min(seconds_to_shutdown, timeout)): + mock_domain.info().AndReturn( + (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple) + if retry_countdown == 0: + mock_domain.shutdown().WithSideEffects(count_shutdowns) + retry_countdown = retry_interval + else: + retry_countdown -= 1 + + if seconds_to_shutdown < timeout: + mock_domain.info().AndReturn( + (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple) + + self.mox.ReplayAll() + + def fake_lookup_by_name(instance_name): + return mock_domain + + def fake_create_domain(**kwargs): + self.reboot_create_called = True + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = {"name": "instancename", "id": "instanceid", + "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"} + self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name) + self.stubs.Set(conn, '_create_domain', fake_create_domain) + result = conn._clean_shutdown(instance, timeout, retry_interval) + + self.assertEqual(succeeds, result) + self.assertEqual(shutdown_attempts, len(shutdown_count)) + + def test_clean_shutdown_first_time(self): + self._test_clean_shutdown(seconds_to_shutdown=2, + timeout=5, + retry_interval=3, + shutdown_attempts=1, + succeeds=True) + + def test_clean_shutdown_with_retry(self): + self._test_clean_shutdown(seconds_to_shutdown=4, + timeout=5, + retry_interval=3, + shutdown_attempts=2, + succeeds=True) + + def test_clean_shutdown_failure(self): + self._test_clean_shutdown(seconds_to_shutdown=6, + timeout=5, + retry_interval=3, + shutdown_attempts=2, + succeeds=False) + + def test_clean_shutdown_no_wait(self): + self._test_clean_shutdown(seconds_to_shutdown=6, + timeout=0, + retry_interval=3, + shutdown_attempts=1, + succeeds=False) + def test_resume(self): dummyxml = ("instance-0000000a" "" diff --git a/nova/tests/virt/test_ironic_api_contracts.py b/nova/tests/virt/test_ironic_api_contracts.py index b63a8dd632..730ba942a2 100644 --- a/nova/tests/virt/test_ironic_api_contracts.py +++ b/nova/tests/virt/test_ironic_api_contracts.py @@ -103,7 +103,7 @@ def test_ComputeDriver_signatures(self): self._check_method(driver.ComputeDriver.power_off, "ComputeDriver.power_off", - ['self', 'instance']) + ['self', 'instance', 'timeout', 'retry_interval']) self._check_method(driver.ComputeDriver.power_on, "ComputeDriver.power_on", diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index ec99d4de45..7f94a98623 100644 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -407,8 +407,9 @@ def cleanup(self, context, instance, network_info, block_device_info=None, """Cleanup after instance being destroyed.""" pass - def power_off(self, instance, node=None): + def power_off(self, instance, timeout=0, retry_interval=0, node=None): """Power off the specified instance.""" + # TODO(PhilDay): Add support for timeout (clean shutdown) if not node: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) pm = get_power_manager(node=node, instance=instance) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index b64459e2ad..ea05ba2878 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -589,10 +589,13 @@ def unrescue(self, instance, network_info): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def power_off(self, instance): + def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance. :param instance: nova.objects.instance.Instance + :param timeout: time to wait for GuestOS to shutdown + :param retry_interval: How often to signal guest while + waiting for it to shutdown """ raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 5ffcbf8f36..ce7f194cae 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -184,7 +184,7 @@ def post_live_migration_at_destination(self, context, instance, block_device_info=None): pass - def power_off(self, instance): + def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0): pass def power_on(self, context, instance, network_info, block_device_info): diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 5b666cfa12..61d750ee3b 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -109,7 +109,8 @@ def suspend(self, instance): def resume(self, context, instance, network_info, block_device_info=None): self._vmops.resume(instance) - def power_off(self, instance): + def power_off(self, instance, timeout=0, retry_interval=0): + # TODO(PhilDay): Add support for timeout (clean shutdown) self._vmops.power_off(instance) def power_on(self, context, instance, network_info, diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 1675a92c4a..b5ecf74132 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2182,8 +2182,85 @@ def unpause(self, instance): dom = self._lookup_by_name(instance['name']) dom.resume() - def power_off(self, instance): + def _clean_shutdown(self, instance, timeout, retry_interval): + """Attempt to shutdown the instance gracefully. + + :param instance: The instance to be shutdown + :param timeout: How long to wait in seconds for the instance to + shutdown + :param retry_interval: How often in seconds to signal the instance + to shutdown while waiting + + :returns: True if the shutdown succeeded + """ + + # List of states that represent a shutdown instance + SHUTDOWN_STATES = [power_state.SHUTDOWN, + power_state.CRASHED] + + try: + dom = self._lookup_by_name(instance["name"]) + except exception.InstanceNotFound: + # If the instance has gone then we don't need to + # wait for it to shutdown + return True + + (state, _max_mem, _mem, _cpus, _t) = dom.info() + state = LIBVIRT_POWER_STATE[state] + if state in SHUTDOWN_STATES: + LOG.info(_LI("Instance already shutdown."), + instance=instance) + return True + + LOG.debug("Shutting down instance from state %s", state, + instance=instance) + dom.shutdown() + retry_countdown = retry_interval + + for sec in six.moves.range(timeout): + + dom = self._lookup_by_name(instance["name"]) + (state, _max_mem, _mem, _cpus, _t) = dom.info() + state = LIBVIRT_POWER_STATE[state] + + if state in SHUTDOWN_STATES: + LOG.info(_LI("Instance shutdown successfully after %d " + "seconds."), sec, instance=instance) + return True + + # Note(PhilD): We can't assume that the Guest was able to process + # any previous shutdown signal (for example it may + # have still been startingup, so within the overall + # timeout we re-trigger the shutdown every + # retry_interval + if retry_countdown == 0: + retry_countdown = retry_interval + # Instance could shutdown at any time, in which case we + # will get an exception when we call shutdown + try: + LOG.debug("Instance in state %s after %d seconds - " + "resending shutdown", state, sec, + instance=instance) + dom.shutdown() + except libvirt.libvirtError: + # Assume this is because its now shutdown, so loop + # one more time to clean up. + LOG.debug("Ignoring libvirt exception from shutdown " + "request.", instance=instance) + continue + else: + retry_countdown -= 1 + + time.sleep(1) + + LOG.info(_LI("Instance failed to shutdown in %d seconds."), + timeout, instance=instance) + return False + + def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" + if timeout: + self._clean_shutdown(instance, timeout, retry_interval) self._destroy(instance) def power_on(self, context, instance, network_info, diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 0adb52de97..2bbf8698b0 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -494,8 +494,9 @@ def unrescue(self, instance, network_info): _vmops = self._get_vmops_for_compute_node(instance.node) _vmops.unrescue(instance) - def power_off(self, instance): + def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" + # TODO(PhilDay): Add support for timeout (clean shutdown) _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.power_off(instance) diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 9cd6fd4be2..5ec51ce898 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -299,8 +299,9 @@ def unrescue(self, instance, network_info): """Unrescue the specified instance.""" self._vmops.unrescue(instance) - def power_off(self, instance): + def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" + # TODO(PhilDay): Add support for timeout (clean shutdown) self._vmops.power_off(instance) def power_on(self, context, instance, network_info, From 41f6e4afc91a2454940abff947bf07973f229ea8 Mon Sep 17 00:00:00 2001 From: Phil Day Date: Fri, 25 Jul 2014 19:13:51 +0000 Subject: [PATCH 418/486] Resize should give guests a chance to shutdown Currently in libvirt operations which power off an instance such as stop, shelve, rescue, and resize simply destroy the underlying VM. Some GuestOS's do not react well to this type of power failure, and so it would be better if these operations followed the same approach as soft_reboot and give the GuestOS as chance to shutdown gracefully. This is one of a set of changes that will eventually expose the choice of whether to give the GuestOS a chance to shutdown via the API. This change implements the compute manager changes to resize for libvirt. Partially-Implements: blueprint user-defined-shutdown DocImpact Change-Id: I03a52cfcc77d89e0982fcdf88be0651b3f12cbdd --- nova/compute/manager.py | 8 ++++++-- nova/tests/compute/test_compute.py | 19 +++++++++++++++---- nova/virt/driver.py | 6 +++++- nova/virt/fake.py | 3 ++- nova/virt/hyperv/driver.py | 4 +++- nova/virt/libvirt/driver.py | 7 ++++--- nova/virt/vmwareapi/driver.py | 4 +++- nova/virt/xenapi/driver.py | 4 +++- 8 files changed, 41 insertions(+), 14 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 83463db6a4..6e27a1ce2c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3563,7 +3563,8 @@ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, @errors_out_migration @wrap_instance_fault def resize_instance(self, context, instance, image, - reservations, migration, instance_type): + reservations, migration, instance_type, + clean_shutdown=True): """Starts the migration of a running instance to another host.""" quotas = quotas_obj.Quotas.from_reservations(context, @@ -3591,10 +3592,13 @@ def resize_instance(self, context, instance, image, block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) + timeout, retry_interval = self._get_power_off_values(context, + instance, clean_shutdown) disk_info = self.driver.migrate_disk_and_power_off( context, instance, migration.dest_host, instance_type, network_info, - block_device_info) + block_device_info, + timeout, retry_interval) self._terminate_volume_connections(context, instance, bdms) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 5d24778bcf..869297ed6e 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -4810,7 +4810,7 @@ def throw_up(*args, **kwargs): self.compute.terminate_instance(self.context, self._objectify(instance), [], []) - def test_resize_instance(self): + def _test_resize_instance(self, clean_shutdown=True): # Ensure instance can be migrated/resized. instance = self._create_fake_instance_obj() instance_type = flavors.get_default_flavor() @@ -4844,20 +4844,31 @@ def test_resize_instance(self): mock.patch.object( self.compute, '_get_instance_block_device_info', return_value='fake_bdinfo'), - mock.patch.object(self.compute, '_terminate_volume_connections') + mock.patch.object(self.compute, '_terminate_volume_connections'), + mock.patch.object(self.compute, '_get_power_off_values', + return_value=(1, 2)) ) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo, - mock_terminate_vol_conn): + mock_terminate_vol_conn, mock_get_power_off_values): self.compute.resize_instance(self.context, instance=instance, migration=migration, image={}, reservations=[], - instance_type=jsonutils.to_primitive(instance_type)) + instance_type=jsonutils.to_primitive(instance_type), + clean_shutdown=clean_shutdown) mock_get_instance_vol_bdinfo.assert_called_once_with( self.context, instance, bdms='fake_bdms') mock_terminate_vol_conn.assert_called_once_with(self.context, instance, 'fake_bdms') + mock_get_power_off_values.assert_caleld_once_with(self.context, + instance, clean_shutdown) self.assertEqual(migration.dest_compute, instance.host) self.compute.terminate_instance(self.context, self._objectify(instance), [], []) + def test_resize_instance(self): + self._test_resize_instance() + + def test_resize_instance_forced_shutdown(self): + self._test_resize_instance(clean_shutdown=False) + def _test_confirm_resize(self, power_on): # Common test case method for confirm_resize def fake(*args, **kwargs): diff --git a/nova/virt/driver.py b/nova/virt/driver.py index ea05ba2878..69ff77146d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -461,11 +461,15 @@ def detach_interface(self, instance, vif): def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, - block_device_info=None): + block_device_info=None, + timeout=0, retry_interval=0): """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. :param instance: nova.objects.instance.Instance + :param timeout: time to wait for GuestOS to shutdown + :param retry_interval: How often to signal guest while + waiting for it to shutdown """ raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index ce7f194cae..fc99b75f76 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -171,7 +171,8 @@ def poll_rebooting_instances(self, timeout, instances): def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, - block_device_info=None): + block_device_info=None, + timeout=0, retry_interval=0): pass def finish_revert_migration(self, context, instance, network_info, diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 61d750ee3b..4c49ecbc82 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -184,7 +184,9 @@ def unfilter_instance(self, instance, network_info): def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, - block_device_info=None): + block_device_info=None, + timeout=0, retry_interval=0): + # TODO(PhilDay): Add support for timeout (clean shutdown) return self._migrationops.migrate_disk_and_power_off(context, instance, dest, flavor, diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index b5ecf74132..baa8dd2cee 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -5192,9 +5192,10 @@ def _is_storage_shared_with(self, dest, inst_base): def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, - block_device_info=None): + block_device_info=None, + timeout=0, retry_interval=0): LOG.debug("Starting migrate_disk_and_power_off", - instance=instance) + instance=instance) # Checks if the migration needs a disk resize down. for kind in ('root_gb', 'ephemeral_gb'): @@ -5220,7 +5221,7 @@ def migrate_disk_and_power_off(self, context, instance, dest, if not shared_storage: utils.execute('ssh', dest, 'mkdir', '-p', inst_base) - self.power_off(instance) + self.power_off(instance, timeout, retry_interval) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 2bbf8698b0..12147b5032 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -207,10 +207,12 @@ def list_instances(self): def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, - block_device_info=None): + block_device_info=None, + timeout=0, retry_interval=0): """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. """ + # TODO(PhilDay): Add support for timeout (clean shutdown) _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.migrate_disk_and_power_off(context, instance, dest, flavor) diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 5ec51ce898..68b730263c 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -269,11 +269,13 @@ def unpause(self, instance): def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, - block_device_info=None): + block_device_info=None, + timeout=0, retry_interval=0): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk """ # NOTE(vish): Xen currently does not use network info. + # TODO(PhilDay): Add support for timeout (clean shutdown) return self._vmops.migrate_disk_and_power_off(context, instance, dest, flavor, block_device_info) From 09b0ec5cf38ef92fc018f5776420e72879d6aa37 Mon Sep 17 00:00:00 2001 From: Phil Day Date: Tue, 5 Aug 2014 16:26:38 +0000 Subject: [PATCH 419/486] Rescue should give guests a chance to shutdown Currently in libvirt operations which power off an instance such as stop, shelve, rescue, and resize simply destroy the underlying VM. Some GuestOS's do not react well to this type of power failure, and so it would be better if these operations followed the same approach as soft_reboot and give the GuestOS as chance to shutdown gracefully. This is one of a set of changes that will eventually expose the choice of whether to give the GuestOS a chance to shutdown via the API. This change implements the compute manager changes to rescue. Partially-Implements: blueprint user-defined-shutdown DocImpact Change-Id: I296397f4fe3308be3727ec127929cea60d46b882 --- nova/compute/manager.py | 4 +++- nova/tests/compute/test_compute_mgr.py | 18 ++++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 83463db6a4..53a16599d7 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3092,7 +3092,7 @@ def _get_rescue_image(self, context, instance, rescue_image_ref=None): @wrap_instance_event @wrap_instance_fault def rescue_instance(self, context, instance, rescue_password, - rescue_image_ref=None): + rescue_image_ref=None, clean_shutdown=True): context = context.elevated() LOG.audit(_('Rescuing'), context=context, instance=instance) @@ -3111,6 +3111,8 @@ def rescue_instance(self, context, instance, rescue_password, network_info=network_info) try: + self._power_off_instance(context, instance, clean_shutdown) + self.driver.rescue(context, instance, network_info, rescue_image_meta, admin_password) diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index b83aea34a7..c5f6708ef0 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1392,7 +1392,7 @@ def test_remove_volume_connection(self, inst_from_db, detach, bdm_get): inst_obj) detach.assert_called_once_with(self.context, inst_obj, bdm) - def test_rescue(self): + def _test_rescue(self, clean_shutdown=True): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE) fake_nw_info = network_model.NetworkInfo() @@ -1408,6 +1408,7 @@ def test_rescue(self): mock.patch.object(self.compute, '_get_rescue_image', return_value=rescue_image_meta), mock.patch.object(self.compute, '_notify_about_instance_usage'), + mock.patch.object(self.compute, '_power_off_instance'), mock.patch.object(self.compute.driver, 'rescue'), mock.patch.object(self.compute.conductor_api, 'notify_usage_exists'), @@ -1416,12 +1417,12 @@ def test_rescue(self): mock.patch.object(instance, 'save') ) as ( event_start, event_finish, elevated_context, get_nw_info, - get_rescue_image, notify_instance_usage, driver_rescue, - notify_usage_exists, get_power_state, instance_save + get_rescue_image, notify_instance_usage, power_off_instance, + driver_rescue, notify_usage_exists, get_power_state, instance_save ): self.compute.rescue_instance( self.context, instance, rescue_password='verybadpass', - rescue_image_ref=None) + rescue_image_ref=None, clean_shutdown=clean_shutdown) # assert the field values on the instance object self.assertEqual(vm_states.RESCUED, instance.vm_state) @@ -1445,6 +1446,9 @@ def test_rescue(self): ] notify_instance_usage.assert_has_calls(notify_calls) + power_off_instance.assert_called_once_with(self.context, instance, + clean_shutdown) + driver_rescue.assert_called_once_with( self.context, instance, fake_nw_info, rescue_image_meta, 'verybadpass') @@ -1455,6 +1459,12 @@ def test_rescue(self): instance_save.assert_called_once_with( expected_task_state=task_states.RESCUING) + def test_rescue(self): + self._test_rescue() + + def test_rescue_forced_shutdown(self): + self._test_rescue(clean_shutdown=False) + def test_unrescue(self): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.RESCUED) From b252d603f473430211e1d39cda6ffeb833282e31 Mon Sep 17 00:00:00 2001 From: Phil Day Date: Fri, 8 Aug 2014 17:34:07 +0000 Subject: [PATCH 420/486] Shelve should give guests a chance to shutdown Currently in libvirt operations which power off an instance such as stop, shelve, rescue, and resize simply destroy the underlying VM. Some GuestOS's do not react well to this type of power failure, and so it would be better if these operations followed the same approach as soft_reboot and give the GuestOS as chance to shutdown gracefully. This is one of a set of changes that will eventually expose the choice of whether to give the GuestOS a chance to shutdown via the API. This change implements the compute manager changes to shelve. Partially-Implements: blueprint user-defined-shutdown DocImpact Change-Id: Iec3dfd17725440958aac395ebc471e51afd6522e --- nova/compute/manager.py | 5 +++-- nova/tests/compute/test_shelve.py | 14 +++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 53a16599d7..f13cca2ff1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -3928,7 +3928,8 @@ def resume_instance(self, context, instance): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def shelve_instance(self, context, instance, image_id): + def shelve_instance(self, context, instance, image_id, + clean_shutdown=True): """Shelve an instance. This should be used when you want to take a snapshot of the instance. @@ -3956,7 +3957,7 @@ def update_task_state(task_state, expected_state=task_states.SHELVING): instance.task_state = task_state instance.save(expected_task_state=expected_state) - self.driver.power_off(instance) + self._power_off_instance(context, instance, clean_shutdown) current_power_state = self._get_power_state(context, instance) self.driver.snapshot(context, instance, image_id, update_task_state) diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py index 8ec670096f..d77a611567 100644 --- a/nova/tests/compute/test_shelve.py +++ b/nova/tests/compute/test_shelve.py @@ -45,7 +45,7 @@ def _fake_resources(): class ShelveComputeManagerTestCase(test_compute.BaseTestCase): - def _shelve_instance(self, shelved_offload_time): + def _shelve_instance(self, shelved_offload_time, clean_shutdown=True): CONF.set_override('shelved_offload_time', shelved_offload_time) db_instance = jsonutils.to_primitive(self._create_fake_instance()) instance = objects.Instance.get_by_uuid( @@ -71,7 +71,12 @@ def _shelve_instance(self, shelved_offload_time): self.compute._notify_about_instance_usage(self.context, instance, 'shelve.start') - self.compute.driver.power_off(instance) + if clean_shutdown: + self.compute.driver.power_off(instance, + CONF.shutdown_timeout, + self.compute.SHUTDOWN_RETRY_INTERVAL) + else: + self.compute.driver.power_off(instance, 0, 0) self.compute._get_power_state(self.context, instance).AndReturn(123) self.compute.driver.snapshot(self.context, instance, 'fake_image_id', @@ -113,11 +118,14 @@ def _shelve_instance(self, shelved_offload_time): self.mox.ReplayAll() self.compute.shelve_instance(self.context, instance, - image_id=image_id) + image_id=image_id, clean_shutdown=clean_shutdown) def test_shelve(self): self._shelve_instance(-1) + def test_shelve_forced_shutdown(self): + self._shelve_instance(-1, clean_shutdown=False) + def test_shelve_offload(self): self._shelve_instance(0) From 87c8067defde854b29fd0d51a3fc1a01c2b832f9 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Tue, 19 Aug 2014 13:55:28 +0000 Subject: [PATCH 421/486] Share common test settings in test_flavor_manage In PrivateFlavorManageTest, there are duplicated test setting code in each test. This patch moves these settings into setUp() method to reduce code. In addition, this patch adds an internal method also for the same purpose. Change-Id: I31883497174141ac4034922981e0a846f1fc2deb --- .../compute/contrib/test_flavor_manage.py | 66 ++++++------------- 1 file changed, 20 insertions(+), 46 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py index 925e2ab761..b9a10791ec 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py @@ -335,11 +335,6 @@ class FakeRequest(object): class PrivateFlavorManageTest(test.TestCase): def setUp(self): super(PrivateFlavorManageTest, self).setUp() - # self.stubs.Set(flavors, - # "get_flavor_by_flavor_id", - # fake_get_flavor_by_flavor_id) - # self.stubs.Set(flavors, "destroy", fake_destroy) - # self.stubs.Set(flavors, "create", fake_create) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], @@ -348,10 +343,11 @@ def setUp(self): self.controller = flavormanage.FlavorManageController() self.flavor_access_controller = flavor_access.FlavorAccessController() - self.app = fakes.wsgi_app(init_only=('flavors',)) - - def test_create_private_flavor_should_not_grant_flavor_access(self): - expected = { + self.ctxt = context.RequestContext('fake', 'fake', + is_admin=True, auth_token=True) + self.app = fakes.wsgi_app(init_only=('flavors',), + fake_auth_context=self.ctxt) + self.expected = { "flavor": { "name": "test", "ram": 512, @@ -359,59 +355,37 @@ def test_create_private_flavor_should_not_grant_flavor_access(self): "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 1, "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": False + "rxtx_factor": 1 } } - ctxt = context.RequestContext('fake', 'fake', - is_admin=True, auth_token=True) - self.app = fakes.wsgi_app(init_only=('flavors',), - fake_auth_context=ctxt) + def _get_response(self): url = '/v2/fake/flavors' req = webob.Request.blank(url) req.headers['Content-Type'] = 'application/json' req.method = 'POST' - req.body = jsonutils.dumps(expected) + req.body = jsonutils.dumps(self.expected) res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in expected["flavor"]: - self.assertEqual(body["flavor"][key], expected["flavor"][key]) + return jsonutils.loads(res.body) + + def test_create_private_flavor_should_not_grant_flavor_access(self): + self.expected["flavor"]["os-flavor-access:is_public"] = False + body = self._get_response() + for key in self.expected["flavor"]: + self.assertEqual(body["flavor"][key], self.expected["flavor"][key]) flavor_access_body = self.flavor_access_controller.index( FakeRequest(), body["flavor"]["id"]) expected_flavor_access_body = { - "tenant_id": "%s" % ctxt.project_id, + "tenant_id": "%s" % self.ctxt.project_id, "flavor_id": "%s" % body["flavor"]["id"] } self.assertNotIn(expected_flavor_access_body, flavor_access_body["flavor_access"]) def test_create_public_flavor_should_not_create_flavor_access(self): - expected = { - "flavor": { - "name": "test", - "ram": 512, - "vcpus": 2, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 1, - "swap": 512, - "rxtx_factor": 1, - "os-flavor-access:is_public": True - } - } - - ctxt = context.RequestContext('fake', 'fake', - is_admin=True, auth_token=True) - self.app = fakes.wsgi_app(init_only=('flavors',), - fake_auth_context=ctxt) + self.expected["flavor"]["os-flavor-access:is_public"] = True self.mox.StubOutWithMock(flavors, "add_flavor_access") self.mox.ReplayAll() - url = '/v2/fake/flavors' - req = webob.Request.blank(url) - req.headers['Content-Type'] = 'application/json' - req.method = 'POST' - req.body = jsonutils.dumps(expected) - res = req.get_response(self.app) - body = jsonutils.loads(res.body) - for key in expected["flavor"]: - self.assertEqual(body["flavor"][key], expected["flavor"][key]) + body = self._get_response() + for key in self.expected["flavor"]: + self.assertEqual(body["flavor"][key], self.expected["flavor"][key]) From d88b4cf31dbff942cf529e63ee5b09356970ed50 Mon Sep 17 00:00:00 2001 From: Alessandro Pilotti Date: Tue, 19 Aug 2014 13:54:31 +0300 Subject: [PATCH 422/486] Fixes Hyper-V unit test path separator issue Fixes an issue in a unit tests were path separators were not handled in a platform independent way causing the test to fail on Windows. Change-Id: Ifca77a561e5a95af8655f2373af665292b2ca3cb Co-Authored-By: Zsolt Dudas Closes-Bug: #1358702 --- nova/tests/virt/hyperv/test_pathutils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/tests/virt/hyperv/test_pathutils.py b/nova/tests/virt/hyperv/test_pathutils.py index f18b584637..0ded84ec6b 100644 --- a/nova/tests/virt/hyperv/test_pathutils.py +++ b/nova/tests/virt/hyperv/test_pathutils.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import os + import mock from nova import test @@ -23,7 +25,7 @@ class PathUtilsTestCase(test.NoDBTestCase): """Unit tests for the Hyper-V PathUtils class.""" def setUp(self): - self.fake_instance_dir = 'C:/fake_instance_dir' + self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir') self.fake_instance_name = 'fake_instance_name' self._pathutils = pathutils.PathUtils() super(PathUtilsTestCase, self).setUp() @@ -43,9 +45,9 @@ def mock_exists(*args, **kwargs): def test_lookup_configdrive_path(self): for format_ext in constants.DISK_FORMAT_MAP: configdrive_path = self._mock_lookup_configdrive_path(format_ext) - self.assertEqual(configdrive_path, - self.fake_instance_dir + '/configdrive.' + - format_ext) + fake_path = os.path.join(self.fake_instance_dir, + 'configdrive.' + format_ext) + self.assertEqual(configdrive_path, fake_path) def test_lookup_configdrive_path_non_exist(self): self._pathutils.get_instance_dir = mock.MagicMock( From 9b93f99fa6cdf3cc5536ca76d8acb135fa3aea4b Mon Sep 17 00:00:00 2001 From: Michael H Wilson Date: Thu, 15 May 2014 23:16:43 -0400 Subject: [PATCH 423/486] Allow three periodic tasks to hit slave Allows the following periodic tasks to hit a slave if a slave_connection is configured: _run_pending_deletes _instance_usage_audit _poll_volume_usage Blueprint: juno-slaveification Change-Id: I38585562e3d3579299e32b5f9ced9311f672abcc --- nova/compute/manager.py | 12 +++++++----- nova/db/api.py | 6 ++++-- nova/db/sqlalchemy/api.py | 5 +++-- nova/objects/instance.py | 14 ++++++++++---- nova/tests/compute/test_compute.py | 13 ++++++++----- nova/tests/compute/test_compute_mgr.py | 3 ++- nova/tests/objects/test_objects.py | 2 +- 7 files changed, 35 insertions(+), 20 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 718f285f1f..102dea5438 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -5248,7 +5248,8 @@ def _instance_usage_audit(self, context): begin, end = utils.last_completed_audit_period() instances = objects.InstanceList.get_active_by_window_joined( context, begin, end, host=self.host, - expected_attrs=['system_metadata', 'info_cache', 'metadata']) + expected_attrs=['system_metadata', 'info_cache', 'metadata'], + use_slave=True) num_instances = len(instances) errors = 0 successes = 0 @@ -5380,13 +5381,13 @@ def _poll_bandwidth_usage(self, context): last_refreshed=refreshed, update_cells=update_cells) - def _get_host_volume_bdms(self, context): + def _get_host_volume_bdms(self, context, use_slave=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) + context, instance.uuid, use_slave=use_slave) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) @@ -5413,7 +5414,8 @@ def _poll_volume_usage(self, context, start_time=None): if not start_time: start_time = utils.last_completed_audit_period()[1] - compute_host_bdms = self._get_host_volume_bdms(context) + compute_host_bdms = self._get_host_volume_bdms(context, + use_slave=True) if not compute_host_bdms: return @@ -5925,7 +5927,7 @@ def _run_pending_deletes(self, context): attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=attrs) + context, filters, expected_attrs=attrs, use_slave=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: diff --git a/nova/db/api.py b/nova/db/api.py index d2af85e4a6..61b295e0c7 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -699,14 +699,16 @@ def instance_get_all_by_filters(context, filters, sort_key='created_at', def instance_get_active_by_window_joined(context, begin, end=None, - project_id=None, host=None): + project_id=None, host=None, + use_slave=False): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project. Specifying a host will filter for instances on a given compute host. """ return IMPL.instance_get_active_by_window_joined(context, begin, end, - project_id, host) + project_id, host, + use_slave=use_slave) def instance_get_all_by_host(context, host, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a7b0434fc1..859dd0ee82 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2070,9 +2070,10 @@ def regex_filter(query, model, filters): @require_context def instance_get_active_by_window_joined(context, begin, end=None, - project_id=None, host=None): + project_id=None, host=None, + use_slave=False): """Return instances and joins that were active during window.""" - session = get_session() + session = get_session(use_slave=use_slave) query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 263b327731..c2eacd7572 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -616,7 +616,8 @@ class InstanceList(base.ObjectListBase, base.NovaObject): # Version 1.4: Instance <= version 1.12 # Version 1.5: Added method get_active_by_window_joined. # Version 1.6: Instance <= version 1.13 - VERSION = '1.6' + # Version 1.7: Added use_slave to get_active_by_window_joined + VERSION = '1.7' fields = { 'objects': fields.ListOfObjectsField('Instance'), @@ -629,6 +630,7 @@ class InstanceList(base.ObjectListBase, base.NovaObject): '1.4': '1.12', '1.5': '1.12', '1.6': '1.13', + '1.7': '1.13', } @base.remotable_classmethod @@ -676,7 +678,8 @@ def get_hung_in_rebooting(cls, context, reboot_window, @base.remotable_classmethod def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, - expected_attrs=None): + expected_attrs=None, + use_slave=False): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) @@ -692,7 +695,8 @@ def _get_active_by_window_joined(cls, context, begin, end=None, @classmethod def get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, - expected_attrs=None): + expected_attrs=None, + use_slave=False): """Get instances and joins active during a certain time window. :param:context: nova request context @@ -702,6 +706,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, :param:host: used to filter instances on a given compute host :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances + :param use_slave if True, ship this query off to a DB slave :returns: InstanceList """ @@ -711,7 +716,8 @@ def get_active_by_window_joined(cls, context, begin, end=None, end = timeutils.isotime(end) if end else None return cls._get_active_by_window_joined(context, begin, end, project_id, host, - expected_attrs) + expected_attrs, + use_slave=use_slave) @base.remotable_classmethod def get_by_security_group_id(cls, context, security_group_id): diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index e921f91570..bf6b8218e0 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -670,7 +670,8 @@ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): mock_get_by_host.assert_called_once_with('fake-context', self.compute.host) mock_get_by_inst.assert_called_once_with('fake-context', - 'fake-instance-uuid') + 'fake-instance-uuid', + use_slave=False) self.assertEqual(expected_host_bdms, got_host_bdms) def test_poll_volume_usage_disabled(self): @@ -691,7 +692,7 @@ def test_poll_volume_usage_returns_no_vols(self): self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # Following methods are called. utils.last_completed_audit_period().AndReturn((0, 0)) - self.compute._get_host_volume_bdms(ctxt).AndReturn([]) + self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) @@ -707,7 +708,8 @@ def test_poll_volume_usage_with_data(self): lambda x, y: [3, 4]) # All the mocks are called utils.last_completed_audit_period().AndReturn((10, 20)) - self.compute._get_host_volume_bdms(ctxt).AndReturn([1, 2]) + self.compute._get_host_volume_bdms(ctxt, + use_slave=True).AndReturn([1, 2]) self.compute._update_volume_usage_cache(ctxt, [3, 4]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) @@ -736,8 +738,9 @@ def test_detach_volume_usage(self): AndReturn(bdm) self.compute.driver.block_stats(instance['name'], 'vdb').\ AndReturn([1L, 30L, 1L, 20L, None]) - self.compute._get_host_volume_bdms(self.context).AndReturn( - host_volume_bdms) + self.compute._get_host_volume_bdms(self.context, + use_slave=True).AndReturn( + host_volume_bdms) self.compute.driver.get_all_volume_usage( self.context, host_volume_bdms).AndReturn( [{'volume': 1, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 463feff8cd..2d55037beb 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -938,7 +938,8 @@ def get_by_filters(self, *args, **kwargs): {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini', 'cleaned': False}, expected_attrs=['info_cache', 'security_groups', - 'system_metadata']).AndReturn([a, b, c]) + 'system_metadata'], + use_slave=True).AndReturn([a, b, c]) self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files') self.compute.driver.delete_instance_files( diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index a4eadcfce8..ed2805a1ac 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -954,7 +954,7 @@ def test_object_serialization_iterables(self): 'InstanceGroup': '1.7-b31ea31fdb452ab7810adbe789244f91', 'InstanceGroupList': '1.2-a474822eebc3e090012e581adcc1fa09', 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f', - 'InstanceList': '1.6-6891f6f61f8eb0b55c0cefac3f734c24', + 'InstanceList': '1.7-71e48495e83df551cefe6691478c865c', 'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a', 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8', 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed', From c51be0903f2d20bdf1ea968efc57f4c6b979cb6f Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Tue, 19 Aug 2014 12:24:51 -0400 Subject: [PATCH 424/486] Send create.end notification even if instance is deleted There was an unprotected instance.save() call which could raise an exception and bypass a create.end notification. This adds a try/except block around that call with appropriate handling of the exception. Change-Id: I308ee84f6ab2bb37ef6570ff896017615a5bf746 Closes-bug: #1358795 --- nova/compute/manager.py | 9 +++++++- nova/tests/compute/test_compute_mgr.py | 31 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 718f285f1f..8f2048c7aa 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2058,7 +2058,14 @@ def _build_and_run_instance(self, context, instance, image, injected_files, instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() - instance.save(expected_task_state=task_states.SPAWNING) + + try: + instance.save(expected_task_state=task_states.SPAWNING) + except (exception.InstanceNotFound, + exception.UnexpectedDeletingTaskStateError) as e: + with excutils.save_and_reraise_exception(): + self._notify_about_instance_usage(context, instance, + 'create.end', fault=e) self._notify_about_instance_usage(context, instance, 'create.end', extra_usage_info={'message': _('Success')}, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 463feff8cd..ac1d2ad4d7 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -2588,6 +2588,37 @@ def fake_notify(*args, **kwargs): mock_notify.call_count - 1] self.assertEqual(expected_call, create_end_call) + @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update') + def test_create_end_on_instance_delete(self, mock_instance_update): + + def fake_notify(*args, **kwargs): + if args[2] == 'create.end': + # Check that launched_at is set on the instance + self.assertIsNotNone(args[1].launched_at) + + exc = exception.InstanceNotFound(instance_id='') + + with contextlib.nested( + mock.patch.object(self.compute.driver, 'spawn'), + mock.patch.object(self.compute, + '_build_networks_for_instance', return_value=[]), + mock.patch.object(self.instance, 'save', + side_effect=[None, None, exc]), + mock.patch.object(self.compute, '_notify_about_instance_usage', + side_effect=fake_notify) + ) as (mock_spawn, mock_networks, mock_save, mock_notify): + self.assertRaises(exception.InstanceNotFound, + self.compute._build_and_run_instance, self.context, + self.instance, self.image, self.injected_files, + self.admin_pass, self.requested_networks, + self.security_groups, self.block_device_mapping, self.node, + self.limits, self.filter_properties) + expected_call = mock.call(self.context, self.instance, + 'create.end', fault=exc) + create_end_call = mock_notify.call_args_list[ + mock_notify.call_count - 1] + self.assertEqual(expected_call, create_end_call) + class ComputeManagerMigrationTestCase(test.NoDBTestCase): def setUp(self): From 86356bf6f13ff5a5394db6b4547b26cd6092b566 Mon Sep 17 00:00:00 2001 From: Ian Cordasco Date: Mon, 30 Jun 2014 14:49:03 -0500 Subject: [PATCH 425/486] Use rfc3986 library to validate URL paths and URIs More work needs to be done in rfc3986 to give the user more control over what they consider to be a valid URI in the context of RFC 3986. For example, a previous incarnation of these tests checked that "1" and "abc" were invalid when according to the RFC they are. Update the API samples and tests to use valid URIs DocImpact Change-Id: I288fbaead64990db1053b7a11e82904611b8498f --- doc/api_samples/os-agents/agent-post-req.json | 4 +- doc/api_samples/os-agents/agent-post-req.xml | 4 +- .../os-agents/agent-post-resp.json | 4 +- doc/api_samples/os-agents/agent-post-resp.xml | 4 +- .../os-agents/agent-update-put-req.json | 4 +- .../os-agents/agent-update-put-req.xml | 4 +- .../os-agents/agent-update-put-resp.json | 4 +- .../os-agents/agent-update-put-resp.xml | 4 +- .../os-agents/agents-get-resp.json | 4 +- doc/api_samples/os-agents/agents-get-resp.xml | 4 +- .../api_samples/os-agents/agent-post-req.json | 4 +- .../os-agents/agent-post-resp.json | 4 +- .../os-agents/agent-update-put-req.json | 2 +- .../os-agents/agent-update-put-resp.json | 4 +- .../os-agents/agents-get-resp.json | 4 +- nova/api/validation/validators.py | 7 +++ nova/api/validator.py | 21 ++----- .../openstack/compute/contrib/test_agents.py | 32 +++++----- .../compute/plugins/v3/test_agents.py | 46 +++++++-------- nova/tests/integrated/test_api_samples.py | 8 +-- .../os-agents/agent-post-resp.json.tpl | 4 +- .../os-agents/agent-update-put-resp.json.tpl | 4 +- .../os-agents/agents-get-resp.json.tpl | 4 +- nova/tests/integrated/v3/test_agents.py | 6 +- nova/tests/test_api_validation.py | 58 +++++++++++++++++++ requirements.txt | 1 + 26 files changed, 151 insertions(+), 98 deletions(-) diff --git a/doc/api_samples/os-agents/agent-post-req.json b/doc/api_samples/os-agents/agent-post-req.json index 217993b17f..1913498547 100644 --- a/doc/api_samples/os-agents/agent-post-req.json +++ b/doc/api_samples/os-agents/agent-post-req.json @@ -5,6 +5,6 @@ "architecture": "x86", "version": "8.0", "md5hash": "add6bb58e139be103324d04d82d8f545", - "url": "xxxxxxxxxxxx" + "url": "http://example.com/path/to/resource" } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-agents/agent-post-req.xml b/doc/api_samples/os-agents/agent-post-req.xml index be93e97ce4..b7b7d036ba 100644 --- a/doc/api_samples/os-agents/agent-post-req.xml +++ b/doc/api_samples/os-agents/agent-post-req.xml @@ -5,5 +5,5 @@ x86 8.0 add6bb58e139be103324d04d82d8f545 - xxxxxxxxxxxx - \ No newline at end of file + http://example.com/path/to/resource + diff --git a/doc/api_samples/os-agents/agent-post-resp.json b/doc/api_samples/os-agents/agent-post-resp.json index f6c760cc67..24ddede90b 100644 --- a/doc/api_samples/os-agents/agent-post-resp.json +++ b/doc/api_samples/os-agents/agent-post-resp.json @@ -5,7 +5,7 @@ "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", - "url": "xxxxxxxxxxxx", + "url": "http://example.com/path/to/resource", "version": "8.0" } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-agents/agent-post-resp.xml b/doc/api_samples/os-agents/agent-post-resp.xml index 79f62b7fb9..abfe15f909 100644 --- a/doc/api_samples/os-agents/agent-post-resp.xml +++ b/doc/api_samples/os-agents/agent-post-resp.xml @@ -1,10 +1,10 @@ - xxxxxxxxxxxx + http://example.com/path/to/resource hypervisor add6bb58e139be103324d04d82d8f545 8.0 x86 os 1 - \ No newline at end of file + diff --git a/doc/api_samples/os-agents/agent-update-put-req.json b/doc/api_samples/os-agents/agent-update-put-req.json index e4eaf53525..f7398504d6 100644 --- a/doc/api_samples/os-agents/agent-update-put-req.json +++ b/doc/api_samples/os-agents/agent-update-put-req.json @@ -1,7 +1,7 @@ { "para": { - "url": "xxx://xxxx/xxx/xxx", + "url": "http://example.com/path/to/resource", "md5hash": "add6bb58e139be103324d04d82d8f545", "version": "7.0" } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-agents/agent-update-put-req.xml b/doc/api_samples/os-agents/agent-update-put-req.xml index f759880c17..9a25cefdda 100644 --- a/doc/api_samples/os-agents/agent-update-put-req.xml +++ b/doc/api_samples/os-agents/agent-update-put-req.xml @@ -1,6 +1,6 @@ 7.0 - xxx://xxxx/xxx/xxx + http://example.com/path/to/resource add6bb58e139be103324d04d82d8f545 - \ No newline at end of file + diff --git a/doc/api_samples/os-agents/agent-update-put-resp.json b/doc/api_samples/os-agents/agent-update-put-resp.json index 6b67222c8c..2919d21388 100644 --- a/doc/api_samples/os-agents/agent-update-put-resp.json +++ b/doc/api_samples/os-agents/agent-update-put-resp.json @@ -2,7 +2,7 @@ "agent": { "agent_id": "1", "md5hash": "add6bb58e139be103324d04d82d8f545", - "url": "xxx://xxxx/xxx/xxx", + "url": "http://example.com/path/to/resource", "version": "7.0" } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-agents/agent-update-put-resp.xml b/doc/api_samples/os-agents/agent-update-put-resp.xml index badf2750ea..ce62db3868 100644 --- a/doc/api_samples/os-agents/agent-update-put-resp.xml +++ b/doc/api_samples/os-agents/agent-update-put-resp.xml @@ -1,7 +1,7 @@ - xxx://xxxx/xxx/xxx + http://example.com/path/to/resource 7.0 1 add6bb58e139be103324d04d82d8f545 - \ No newline at end of file + diff --git a/doc/api_samples/os-agents/agents-get-resp.json b/doc/api_samples/os-agents/agents-get-resp.json index 73ba45c240..92e14e1dc5 100644 --- a/doc/api_samples/os-agents/agents-get-resp.json +++ b/doc/api_samples/os-agents/agents-get-resp.json @@ -6,8 +6,8 @@ "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", - "url": "xxxxxxxxxxxx", + "url": "http://example.com/path/to/resource", "version": "8.0" } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/os-agents/agents-get-resp.xml b/doc/api_samples/os-agents/agents-get-resp.xml index 4194f62c96..d804245305 100644 --- a/doc/api_samples/os-agents/agents-get-resp.xml +++ b/doc/api_samples/os-agents/agents-get-resp.xml @@ -1,4 +1,4 @@ - - \ No newline at end of file + + diff --git a/doc/v3/api_samples/os-agents/agent-post-req.json b/doc/v3/api_samples/os-agents/agent-post-req.json index 217993b17f..1913498547 100644 --- a/doc/v3/api_samples/os-agents/agent-post-req.json +++ b/doc/v3/api_samples/os-agents/agent-post-req.json @@ -5,6 +5,6 @@ "architecture": "x86", "version": "8.0", "md5hash": "add6bb58e139be103324d04d82d8f545", - "url": "xxxxxxxxxxxx" + "url": "http://example.com/path/to/resource" } -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-agents/agent-post-resp.json b/doc/v3/api_samples/os-agents/agent-post-resp.json index f6c760cc67..24ddede90b 100644 --- a/doc/v3/api_samples/os-agents/agent-post-resp.json +++ b/doc/v3/api_samples/os-agents/agent-post-resp.json @@ -5,7 +5,7 @@ "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", - "url": "xxxxxxxxxxxx", + "url": "http://example.com/path/to/resource", "version": "8.0" } -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-agents/agent-update-put-req.json b/doc/v3/api_samples/os-agents/agent-update-put-req.json index e166abf9ee..89cbcaba39 100644 --- a/doc/v3/api_samples/os-agents/agent-update-put-req.json +++ b/doc/v3/api_samples/os-agents/agent-update-put-req.json @@ -1,6 +1,6 @@ { "agent": { - "url": "xxx://xxxx/xxx/xxx", + "url": "http://example.com/path/to/resource", "md5hash": "add6bb58e139be103324d04d82d8f545", "version": "7.0" } diff --git a/doc/v3/api_samples/os-agents/agent-update-put-resp.json b/doc/v3/api_samples/os-agents/agent-update-put-resp.json index 866994e4c9..2964c0f894 100644 --- a/doc/v3/api_samples/os-agents/agent-update-put-resp.json +++ b/doc/v3/api_samples/os-agents/agent-update-put-resp.json @@ -2,7 +2,7 @@ "agent": { "agent_id": 1, "md5hash": "add6bb58e139be103324d04d82d8f545", - "url": "xxx://xxxx/xxx/xxx", + "url": "http://example.com/path/to/resource", "version": "7.0" } -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-agents/agents-get-resp.json b/doc/v3/api_samples/os-agents/agents-get-resp.json index 73ba45c240..92e14e1dc5 100644 --- a/doc/v3/api_samples/os-agents/agents-get-resp.json +++ b/doc/v3/api_samples/os-agents/agents-get-resp.json @@ -6,8 +6,8 @@ "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", - "url": "xxxxxxxxxxxx", + "url": "http://example.com/path/to/resource", "version": "8.0" } ] -} \ No newline at end of file +} diff --git a/nova/api/validation/validators.py b/nova/api/validation/validators.py index ce74923756..0974fa177d 100644 --- a/nova/api/validation/validators.py +++ b/nova/api/validation/validators.py @@ -17,6 +17,7 @@ """ import jsonschema +import rfc3986 import six from nova import exception @@ -40,6 +41,12 @@ def _validate_uuid_format(instance): return uuidutils.is_uuid_like(instance) +@jsonschema.FormatChecker.cls_checks('uri') +def _validate_uri(instance): + return rfc3986.is_valid_uri(instance, require_scheme=True, + require_authority=True) + + class _SchemaValidator(object): """A validator class diff --git a/nova/api/validator.py b/nova/api/validator.py index 2e8462f0d2..cce621849d 100644 --- a/nova/api/validator.py +++ b/nova/api/validator.py @@ -14,8 +14,8 @@ # under the License. import base64 -import re +import rfc3986 import six from nova.openstack.common import log as logging @@ -24,21 +24,6 @@ LOG = logging.getLogger(__name__) -def _get_path_validator_regex(): - # rfc3986 path validator regex from - # http://jmrware.com/articles/2009/uri_regexp/URI_regex.html - pchar = "([A-Za-z0-9\-._~!$&'()*+,;=:@]|%[0-9A-Fa-f]{2})" - path = "((/{pchar}*)*|" - path += "/({pchar}+(/{pchar}*)*)?|" - path += "{pchar}+(/{pchar}*)*|" - path += "{pchar}+(/{pchar}*)*|)" - path = path.format(pchar=pchar) - return re.compile(path) - - -VALIDATE_PATH_RE = _get_path_validator_regex() - - def validate_str(max_length=None): def _do(val): @@ -69,7 +54,9 @@ def validate_url_path(val): if not validate_str()(val): return False - return VALIDATE_PATH_RE.match(val).end() == len(val) + uri = rfc3986.URIReference(None, None, val, None, None) + + return uri.path_is_valid() and val.startswith('/') def validate_image_path(val): diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/api/openstack/compute/contrib/test_agents.py index 7373efeb27..e287a4d2dd 100644 --- a/nova/tests/api/openstack/compute/contrib/test_agents.py +++ b/nova/tests/api/openstack/compute/contrib/test_agents.py @@ -24,25 +24,25 @@ fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx1', + 'url': 'http://example.com/path/to/resource1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'id': 2}, {'hypervisor': 'xen', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx2', + 'url': 'http://example.com/path/to/resource2', 'md5hash': 'add6bb58e139be103324d04d82d8f547', 'id': 3}, {'hypervisor': 'xen', 'os': 'win', 'architecture': 'power', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx3', + 'url': 'http://example.com/path/to/resource3', 'md5hash': 'add6bb58e139be103324d04d82d8f548', 'id': 4}, ] @@ -106,13 +106,13 @@ def test_agents_create(self): 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} response = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}} res_dict = self.controller.create(req, body) @@ -151,7 +151,7 @@ def _test_agents_create_with_invalid_length(self, key): 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} body['agent'][key] = 'x' * 256 self.assertRaises(webob.exc.HTTPBadRequest, @@ -185,25 +185,25 @@ def test_agents_list(self): agents_list = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx1', + 'url': 'http://example.com/path/to/resource1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'agent_id': 2}, {'hypervisor': 'xen', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx2', + 'url': 'http://example.com/path/to/resource2', 'md5hash': 'add6bb58e139be103324d04d82d8f547', 'agent_id': 3}, {'hypervisor': 'xen', 'os': 'win', 'architecture': 'power', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx3', + 'url': 'http://example.com/path/to/resource3', 'md5hash': 'add6bb58e139be103324d04d82d8f548', 'agent_id': 4}, ] @@ -215,13 +215,13 @@ def test_agents_list_with_hypervisor(self): response = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx1', + 'url': 'http://example.com/path/to/resource1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'agent_id': 2}, ] @@ -230,11 +230,11 @@ def test_agents_list_with_hypervisor(self): def test_agents_update(self): req = FakeRequest() body = {'para': {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} response = {'agent': {'agent_id': 1, 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} res_dict = self.controller.update(req, 1, body) self.assertEqual(res_dict, response) @@ -258,7 +258,7 @@ def test_agents_update_value_error(self): def _test_agents_update_with_invalid_length(self, key): req = FakeRequest() body = {'para': {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} body['para'][key] = 'x' * 256 self.assertRaises(webob.exc.HTTPBadRequest, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_agents.py b/nova/tests/api/openstack/compute/plugins/v3/test_agents.py index e7a07fed2d..a4b140214d 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_agents.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_agents.py @@ -23,25 +23,25 @@ fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx1', + 'url': 'http://example.com/path/to/resource1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'id': 2}, {'hypervisor': 'xen', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx2', + 'url': 'http://example.com/path/to/resource2', 'md5hash': 'add6bb58e139be103324d04d82d8f547', 'id': 3}, {'hypervisor': 'xen', 'os': 'win', 'architecture': 'power', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx3', + 'url': 'http://example.com/path/to/resource3', 'md5hash': 'add6bb58e139be103324d04d82d8f548', 'id': 4}, ] @@ -109,13 +109,13 @@ def test_agents_create(self): 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} response = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}} res_dict = self.controller.create(req, body=body) @@ -130,7 +130,7 @@ def test_agents_create_with_existed_agent(self): 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(exc.HTTPConflict, self.controller.create, req, body=body) @@ -141,7 +141,7 @@ def test_agents_create_without_md5hash(self): 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx'}} + 'url': 'http://example.com/path/to/resource'}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @@ -160,7 +160,7 @@ def test_agents_create_without_version(self): body = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @@ -170,7 +170,7 @@ def test_agents_create_without_architecture(self): body = {'agent': {'hypervisor': 'kvm', 'os': 'win', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @@ -180,7 +180,7 @@ def test_agents_create_without_os(self): body = {'agent': {'hypervisor': 'kvm', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @@ -190,7 +190,7 @@ def test_agents_create_without_hypervisor(self): body = {'agent': {'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @@ -213,7 +213,7 @@ def _test_agents_create_with_invalid_length(self, key): 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} body['agent'][key] = 'x' * 256 self.assertRaises(exception.ValidationError, self.controller.create, @@ -247,25 +247,25 @@ def test_agents_list(self): agents_list = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx1', + 'url': 'http://example.com/path/to/resource1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'agent_id': 2}, {'hypervisor': 'xen', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx2', + 'url': 'http://example.com/path/to/resource2', 'md5hash': 'add6bb58e139be103324d04d82d8f547', 'agent_id': 3}, {'hypervisor': 'xen', 'os': 'win', 'architecture': 'power', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx3', + 'url': 'http://example.com/path/to/resource3', 'md5hash': 'add6bb58e139be103324d04d82d8f548', 'agent_id': 4}, ] @@ -277,13 +277,13 @@ def test_agents_list_with_hypervisor(self): response = [{'hypervisor': 'kvm', 'os': 'win', 'architecture': 'x86', 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545', 'agent_id': 1}, {'hypervisor': 'kvm', 'os': 'linux', 'architecture': 'x86', 'version': '16.0', - 'url': 'xxx://xxxx/xxx/xxx1', + 'url': 'http://example.com/path/to/resource1', 'md5hash': 'add6bb58e139be103324d04d82d8f546', 'agent_id': 2}, ] @@ -292,11 +292,11 @@ def test_agents_list_with_hypervisor(self): def test_agents_update(self): req = FakeRequest() body = {'agent': {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} response = {'agent': {'agent_id': 1, 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} res_dict = self.controller.update(req, 1, body=body) self.assertEqual(res_dict, response) @@ -304,7 +304,7 @@ def test_agents_update(self): def test_agents_update_without_md5hash(self): req = FakeRequest() body = {'agent': {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx'}} + 'url': 'http://example.com/path/to/resource'}} self.assertRaises(exception.ValidationError, self.controller.update, req, 1, body=body) @@ -335,7 +335,7 @@ def test_agents_update_with_empty(self): def _test_agents_update_with_invalid_length(self, key): req = FakeRequest() body = {'agent': {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} body['agent'][key] = 'x' * 256 self.assertRaises(exception.ValidationError, self.controller.update, diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 4707a74889..f1be700c22 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -1378,7 +1378,7 @@ def _get_flags(self): def setUp(self): super(AgentsJsonTest, self).setUp() - fake_agents_list = [{'url': 'xxxxxxxxxxxx', + fake_agents_list = [{'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', @@ -1419,7 +1419,7 @@ def fake_agent_build_destroy(context, agent_update_id): def test_agent_create(self): # Creates a new agent build. - project = {'url': 'xxxxxxxxxxxx', + project = {'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', @@ -1435,7 +1435,7 @@ def test_agent_create(self): def test_agent_list(self): # Return a list of all agent builds. response = self._do_get('os-agents') - project = {'url': 'xxxxxxxxxxxx', + project = {'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', @@ -1449,7 +1449,7 @@ def test_agent_update(self): # Update an existing agent build. agent_id = 1 subs = {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'} response = self._do_put('os-agents/%s' % agent_id, 'agent-update-put-req', subs) diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl index f6c760cc67..24ddede90b 100644 --- a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl @@ -5,7 +5,7 @@ "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", - "url": "xxxxxxxxxxxx", + "url": "http://example.com/path/to/resource", "version": "8.0" } -} \ No newline at end of file +} diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl index 866994e4c9..2964c0f894 100644 --- a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl @@ -2,7 +2,7 @@ "agent": { "agent_id": 1, "md5hash": "add6bb58e139be103324d04d82d8f545", - "url": "xxx://xxxx/xxx/xxx", + "url": "http://example.com/path/to/resource", "version": "7.0" } -} \ No newline at end of file +} diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl index 73ba45c240..92e14e1dc5 100644 --- a/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl @@ -6,8 +6,8 @@ "hypervisor": "hypervisor", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", - "url": "xxxxxxxxxxxx", + "url": "http://example.com/path/to/resource", "version": "8.0" } ] -} \ No newline at end of file +} diff --git a/nova/tests/integrated/v3/test_agents.py b/nova/tests/integrated/v3/test_agents.py index 99a53bcf51..bdb3b4e220 100644 --- a/nova/tests/integrated/v3/test_agents.py +++ b/nova/tests/integrated/v3/test_agents.py @@ -24,7 +24,7 @@ class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV3): def setUp(self): super(AgentsJsonTest, self).setUp() - fake_agents_list = [{'url': 'xxxxxxxxxxxx', + fake_agents_list = [{'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', @@ -65,7 +65,7 @@ def fake_agent_build_destroy(context, agent_update_id): def test_agent_create(self): # Creates a new agent build. - project = {'url': 'xxxxxxxxxxxx', + project = {'url': 'http://example.com/path/to/resource', 'hypervisor': 'hypervisor', 'architecture': 'x86', 'os': 'os', @@ -85,7 +85,7 @@ def test_agent_update(self): # Update an existing agent build. agent_id = 1 subs = {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', + 'url': 'http://example.com/path/to/resource', 'md5hash': 'add6bb58e139be103324d04d82d8f545'} response = self._do_put('os-agents/%s' % agent_id, 'agent-update-put-req', subs) diff --git a/nova/tests/test_api_validation.py b/nova/tests/test_api_validation.py index e055817489..a30a5fe92d 100644 --- a/nova/tests/test_api_validation.py +++ b/nova/tests/test_api_validation.py @@ -692,6 +692,64 @@ def test_validate_uuid_fails(self): expected_detail=detail) +class UriTestCase(APIValidationTestCase): + + def setUp(self): + super(UriTestCase, self).setUp() + schema = { + 'type': 'object', + 'properties': { + 'foo': { + 'type': 'string', + 'format': 'uri', + }, + }, + } + + @validation.schema(request_body_schema=schema) + def post(body): + return 'Validation succeeded.' + + self.post = post + + def test_validate_uri(self): + self.assertEqual('Validation succeeded.', + self.post( + body={'foo': 'http://localhost:8774/v2/servers'} + )) + self.assertEqual('Validation succeeded.', + self.post( + body={'foo': 'http://[::1]:8774/v2/servers'} + )) + + def test_validate_uri_fails(self): + base_detail = ("Invalid input for field/attribute foo. Value: {0}. " + "'{0}' is not a 'uri'") + invalid_uri = 'http://localhost:8774/v2/servers##' + self.check_validation_error(self.post, + body={'foo': invalid_uri}, + expected_detail=base_detail.format( + invalid_uri)) + + invalid_uri = 'http://[fdf8:01]:8774/v2/servers' + self.check_validation_error(self.post, + body={'foo': invalid_uri}, + expected_detail=base_detail.format( + invalid_uri)) + + invalid_uri = '1' + self.check_validation_error(self.post, + body={'foo': invalid_uri}, + expected_detail=base_detail.format( + invalid_uri)) + + invalid_uri = 'abc' + self.check_validation_error(self.post, + body={'foo': invalid_uri}, + expected_detail=base_detail.format( + invalid_uri)) + + class Ipv4TestCase(APIValidationTestCase): def setUp(self): diff --git a/requirements.txt b/requirements.txt index 7fb504cc45..9cf68daee2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,3 +37,4 @@ pycadf>=0.5.1 oslo.messaging>=1.4.0.0a3 oslo.i18n>=0.1.0 # Apache-2.0 lockfile>=0.8 +rfc3986>=0.2.0 # Apache-2.0 From 9c9e44e111abafde1e05a5e38f7e1645ec8ba6df Mon Sep 17 00:00:00 2001 From: Alessandro Pilotti Date: Tue, 19 Aug 2014 15:46:51 +0300 Subject: [PATCH 426/486] Adds get_instance_disk_info to compute drivers Solves a live migration regression issue by adding the get_instance_disk_info to the compute drivers that do not have it already implemented. The method is called by the compute manager. Change-Id: If5cd9a6f5948c08ef5f1cba2eda31f1175709137 Closes-Bug: #1358719 Co-Authored-By: Ionut Balutoiu --- nova/tests/virt/hyperv/test_hypervapi.py | 9 +++++++++ nova/tests/virt/vmwareapi/test_driver_api.py | 9 +++++++++ nova/virt/hyperv/driver.py | 3 +++ nova/virt/vmwareapi/driver.py | 3 +++ 4 files changed, 24 insertions(+) diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 84a1802364..99b717e65e 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -765,6 +765,15 @@ def _test_pre_live_migration(self, cow, with_volumes): else: self.assertIsNone(self._fetched_image) + def test_get_instance_disk_info_is_implemented(self): + # Ensure that the method has been implemented in the driver + try: + disk_info = self._conn.get_instance_disk_info('fake_instance_name') + self.assertIsNone(disk_info) + except NotImplementedError: + self.fail("test_get_instance_disk_info() should not raise " + "NotImplementedError") + def test_snapshot_with_update_failure(self): (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks() diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py index 58a7c2765b..d993fe50a4 100644 --- a/nova/tests/virt/vmwareapi/test_driver_api.py +++ b/nova/tests/virt/vmwareapi/test_driver_api.py @@ -2571,6 +2571,15 @@ def test_rollback_live_migration_at_destination(self): mock_destroy.assert_called_once_with(self.context, "instance", [], None) + def test_get_instance_disk_info_is_implemented(self): + # Ensure that the method has been implemented in the driver + try: + disk_info = self.conn.get_instance_disk_info('fake_instance_name') + self.assertIsNone(disk_info) + except NotImplementedError: + self.fail("test_get_instance_disk_info() should not raise " + "NotImplementedError") + def test_destroy(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 5b666cfa12..7570bfa4d3 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -164,6 +164,9 @@ def check_can_live_migrate_source(self, context, instance, return self._livemigrationops.check_can_live_migrate_source( context, instance, dest_check_data) + def get_instance_disk_info(self, instance_name, block_device_info=None): + pass + def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" msg = _("VIF plugging is not supported by the Hyper-V driver.") diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index f0ba5a41f4..43be855573 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -275,6 +275,9 @@ def rollback_live_migration_at_destination(self, context, instance, """Clean up destination node after a failed live migration.""" self.destroy(context, instance, network_info, block_device_info) + def get_instance_disk_info(self, instance_name, block_device_info=None): + pass + def get_vnc_console(self, context, instance): """Return link to instance's VNC console using vCenter logic.""" # vCenter does not actually run the VNC service From 231347ebbb1c61976e7bd2bd3734d611d21e0ec0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 23 May 2014 11:12:31 -0700 Subject: [PATCH 427/486] Support reserving ips at network create time It is horribly inefficient to create a network and reserve a large swath of ips, so allow the network creator to specify which ips he wants to be available in the network by specifying allowed_start and allowed_end. Something like: network-create foo --cidr 10.0.0.0/24 --allowed-start 10.0.0.5 \ --allowed-end 10.0.0.250 would ensure that 10.0.0.0-10.0.0.4 and 10.0.0.254-10.0.0.255 are marked reserved so instances would be assigned addresses from the middle of the range. Partially-implements blueprint better-support-for-multiple-networks Change-Id: I5427e1a2d202658d374292559fc7054f2fa51124 --- nova/network/manager.py | 64 ++++++++++++++++++++++-------- nova/tests/fake_network.py | 3 +- nova/tests/network/test_manager.py | 41 ++++++++++++++++++- 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index a09aed2cd9..10b7158cb7 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1079,12 +1079,13 @@ def create_networks(self, context, network_size=None, cidr_v6=None, gateway=None, gateway_v6=None, bridge=None, bridge_interface=None, dns1=None, dns2=None, - fixed_cidr=None, **kwargs): + fixed_cidr=None, allowed_start=None, + allowed_end=None, **kwargs): arg_names = ("label", "cidr", "multi_host", "num_networks", "network_size", "cidr_v6", "gateway", "gateway_v6", "bridge", "bridge_interface", "dns1", "dns2", - "fixed_cidr") + "fixed_cidr", "allowed_start", "allowed_end") if 'mtu' not in kwargs: kwargs['mtu'] = CONF.network_device_mtu if 'dhcp_server' not in kwargs: @@ -1149,12 +1150,24 @@ def create_networks(self, context, LOG.debug('Create network: |%s|', kwargs) return self._do_create_networks(context, **kwargs) + @staticmethod + def _index_of(subnet, ip): + try: + start = netaddr.IPAddress(ip) + except netaddr.AddrFormatError: + raise ValueError(_("Not a valid IP Address")) + index = start.value - subnet.value + if index < 0 or index >= subnet.size: + raise ValueError(_("IP not within cidr range")) + return index + def _do_create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, fixed_cidr=None, mtu=None, dhcp_server=None, - enable_dhcp=None, share_address=None, **kwargs): + enable_dhcp=None, share_address=None, + allowed_start=None, allowed_end=None, **kwargs): """Create networks based on parameters.""" # NOTE(jkoelker): these are dummy values to make sure iter works # TODO(tr3buchet): disallow carving up networks @@ -1250,17 +1263,38 @@ def find_next(subnet): else: net.label = label + bottom_reserved = self._bottom_reserved_ips + top_reserved = self._top_reserved_ips extra_reserved = [] if cidr and subnet_v4: + current = subnet_v4[1] + if allowed_start: + try: + val = self._index_of(subnet_v4, allowed_start) + except ValueError as exc: + raise ValueError('allowed_start: %s' % unicode(exc)) + current = netaddr.IPAddress(allowed_start) + bottom_reserved = val + if allowed_end: + try: + val = self._index_of(subnet_v4, allowed_end) + except ValueError as exc: + raise ValueError('allowed_end: %s' % unicode(exc)) + top_reserved = subnet_v4.size - 1 - val net.cidr = str(subnet_v4) net.netmask = str(subnet_v4.netmask) - net.gateway = gateway or str(subnet_v4[1]) net.broadcast = str(subnet_v4.broadcast) - net.dhcp_start = str(subnet_v4[2]) + if gateway: + net.gateway = gateway + else: + net.gateway = current + current += 1 if not dhcp_server: dhcp_server = net.gateway - if net.dhcp_start == dhcp_server: - net.dhcp_start = str(subnet_v4[3]) + net.dhcp_start = current + current += 1 + if str(net.dhcp_start) == dhcp_server: + net.dhcp_start = current net.dhcp_server = dhcp_server extra_reserved.append(str(net.dhcp_server)) extra_reserved.append(str(net.gateway)) @@ -1286,8 +1320,9 @@ def find_next(subnet): used_vlans.sort() vlan = used_vlans[-1] + 1 - net.vpn_private_address = str(subnet_v4[2]) - net.dhcp_start = str(subnet_v4[3]) + net.vpn_private_address = net.dhcp_start + extra_reserved.append(str(net.vpn_private_address)) + net.dhcp_start = net.dhcp_start + 1 net.vlan = vlan net.bridge = 'br%s' % vlan @@ -1301,7 +1336,8 @@ def find_next(subnet): if cidr and subnet_v4: self._create_fixed_ips(context, net.id, fixed_cidr, - extra_reserved) + extra_reserved, bottom_reserved, + top_reserved) # NOTE(danms): Remove this in RPC API v2.0 return obj_base.obj_to_primitive(networks) @@ -1332,16 +1368,12 @@ def _top_reserved_ips(self): # pylint: disable=R0201 return 1 # broadcast def _create_fixed_ips(self, context, network_id, fixed_cidr=None, - extra_reserved=None): + extra_reserved=None, bottom_reserved=0, + top_reserved=0): """Create all fixed ips for network.""" network = self._get_network_by_id(context, network_id) - # NOTE(vish): Should these be properties of the network as opposed - # to properties of the manager class? - bottom_reserved = self._bottom_reserved_ips - top_reserved = self._top_reserved_ips if extra_reserved is None: extra_reserved = [] - if not fixed_cidr: fixed_cidr = netaddr.IPNetwork(network['cidr']) num_ips = len(fixed_cidr) diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index dd6c588a78..873d2cfa95 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -153,7 +153,8 @@ def deallocate_fixed_ip(self, context, address=None, host=None, self.deallocate_called = address def _create_fixed_ips(self, context, network_id, fixed_cidr=None, - extra_reserved=None): + extra_reserved=None, bottom_reserved=0, + top_reserved=0): pass def get_instance_nw_info(context, instance_id, rxtx_factor, diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 694f3d6e11..61f8d67afc 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -333,9 +333,45 @@ def test_validate_reserved(self): 256, None, None, None, None, None) self.assertEqual(1, len(nets)) network = nets[0] - self.assertEqual(3, db.network_count_reserved_ips(context_admin, + self.assertEqual(4, db.network_count_reserved_ips(context_admin, network['id'])) + def test_validate_reserved_start_end(self): + context_admin = context.RequestContext('testuser', 'testproject', + is_admin=True) + nets = self.network.create_networks(context_admin, 'fake', + '192.168.0.0/24', False, 1, + 256, dhcp_server='192.168.0.11', + allowed_start='192.168.0.10', + allowed_end='192.168.0.245') + self.assertEqual(1, len(nets)) + network = nets[0] + # gateway defaults to beginning of allowed_start + self.assertEqual('192.168.0.10', network['gateway']) + # vpn_server doesn't conflict with dhcp_start + self.assertEqual('192.168.0.12', network['vpn_private_address']) + # dhcp_start doesn't conflict with dhcp_server + self.assertEqual('192.168.0.13', network['dhcp_start']) + # NOTE(vish): 10 from the beginning, 10 from the end, and + # 1 for the gateway, 1 for the dhcp server, + # 1 for the vpn server + self.assertEqual(23, db.network_count_reserved_ips(context_admin, + network['id'])) + + def test_validate_reserved_start_out_of_range(self): + context_admin = context.RequestContext('testuser', 'testproject', + is_admin=True) + self.assertRaises(ValueError, self.network.create_networks, + context_admin, 'fake', '192.168.0.0/24', False, + 1, 256, allowed_start='192.168.1.10') + + def test_validate_reserved_end_invalid(self): + context_admin = context.RequestContext('testuser', 'testproject', + is_admin=True) + self.assertRaises(ValueError, self.network.create_networks, + context_admin, 'fake', '192.168.0.0/24', False, + 1, 256, allowed_end='invalid') + def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) @@ -1782,7 +1818,8 @@ def dnsdomain_get(context, instance_domain): self.assertTrue(res) def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None, - extra_reserved=None): + extra_reserved=None, bottom_reserved=0, + top_reserved=0): return None def test_get_instance_nw_info_client_exceptions(self): From 7d61239dfec85818859f925f47b9d2438b1786d6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 3 Jun 2014 11:08:04 -0700 Subject: [PATCH 428/486] Use real exceptions for network create and destroy In order to provide reasonable error messages to the user, we need real exceptions instead of a bunch of random value Errors. Partially-implements blueprint better-support-for-multiple-networks Change-Id: Id725afbdde40cba879608d15de9aa2753a256d10 --- nova/exception.py | 56 ++++++++++++++++++++++-------- nova/network/manager.py | 56 ++++++++++++++++-------------- nova/tests/network/test_manager.py | 22 ++++++++++-- 3 files changed, 92 insertions(+), 42 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 53281a4be9..d3677ac9af 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -316,10 +316,6 @@ class InvalidContentType(Invalid): msg_fmt = _("Invalid content type %(content_type)s.") -class InvalidCidr(Invalid): - msg_fmt = _("Invalid cidr %(cidr)s.") - - class InvalidUnicodeParameter(Invalid): msg_fmt = _("Invalid Parameter: " "Unicode is not supported by the current database.") @@ -594,10 +590,51 @@ class NetworkInUse(NovaException): msg_fmt = _("Network %(network_id)s is still in use.") -class NetworkNotCreated(NovaException): +class InvalidNetworkParam(Invalid): + # NOTE(vish) base class for network create param errors + code = 422 + + +class NetworkNotCreated(InvalidNetworkParam): msg_fmt = _("%(req)s is required to create a network.") +class LabelTooLong(InvalidNetworkParam): + msg_fmt = _("Maximum allowed length for 'label' is 255.") + + +class InvalidIntValue(InvalidNetworkParam): + msg_fmt = _("%(key)s must be an integer.") + + +class InvalidCidr(InvalidNetworkParam): + msg_fmt = _("%(cidr)s is not a valid ip network.") + + +class InvalidAddress(InvalidNetworkParam): + msg_fmt = _("%(address)s is not a valid ip address.") + + +class AddressOutOfRange(InvalidNetworkParam): + msg_fmt = _("%(address)s is not within %(cidr)s.") + + +class DuplicateVlan(NovaException): + msg_fmt = _("Detected existing vlan with id %(vlan)d") + code = 409 + + +class CidrConflict(NovaException): + msg_fmt = _('Requested cidr (%(cidr)s) conflicts ' + 'with existing cidr (%(other)s)') + code = 409 + + +class NetworkHasProject(NetworkInUse): + msg_fmt = _('Network must be disassociated from project ' + '%(project_id)s before it can be deleted.') + + class NetworkNotFound(NotFound): msg_fmt = _("Network %(network_id)s could not be found.") @@ -1235,15 +1272,6 @@ class InstancePasswordSetFailed(NovaException): safe = True -class DuplicateVlan(NovaException): - msg_fmt = _("Detected existing vlan with id %(vlan)d") - - -class CidrConflict(NovaException): - msg_fmt = _("There was a conflict when trying to complete your request.") - code = 409 - - class InstanceNotFound(NotFound): ec2_code = 'InvalidInstanceID.NotFound' msg_fmt = _("Instance %(instance_id)s could not be found.") diff --git a/nova/network/manager.py b/nova/network/manager.py index 10b7158cb7..857a22a415 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -1072,7 +1072,7 @@ def _convert_int_args(kwargs): continue kwargs[key] = int(value) except ValueError: - raise ValueError(_("%s must be an integer") % key) + raise exception.InvalidIntValue(key=key) def create_networks(self, context, label, cidr=None, multi_host=None, num_networks=None, @@ -1105,7 +1105,7 @@ def create_networks(self, context, # Size of "label" column in nova.networks is 255, hence the restriction if len(label) > 255: - raise ValueError(_("Maximum allowed length for 'label' is 255.")) + raise exception.LabelTooLong() if not (kwargs["cidr"] or kwargs["cidr_v6"]): raise exception.NetworkNotCreated(req="cidr or cidr_v6") @@ -1118,10 +1118,22 @@ def create_networks(self, context, if not kwargs[fld]: raise exception.NetworkNotCreated(req=fld) + if kwargs["cidr_v6"]: + # NOTE(vish): just for validation + try: + netaddr.IPNetwork(kwargs["cidr_v6"]) + except netaddr.AddrFormatError: + raise exception.InvalidCidr(cidr=kwargs["cidr_v6"]) + + if kwargs["cidr"]: + try: + fixnet = netaddr.IPNetwork(kwargs["cidr"]) + except netaddr.AddrFormatError: + raise exception.InvalidCidr(cidr=kwargs["cidr"]) + kwargs["num_networks"] = kwargs["num_networks"] or CONF.num_networks if not kwargs["network_size"]: if kwargs["cidr"]: - fixnet = netaddr.IPNetwork(kwargs["cidr"]) each_subnet_size = fixnet.size / kwargs["num_networks"] if each_subnet_size > CONF.network_size: subnet = 32 - int(math.log(CONF.network_size, 2)) @@ -1145,7 +1157,10 @@ def create_networks(self, context, kwargs["dns1"] = kwargs["dns1"] or CONF.flat_network_dns if kwargs["fixed_cidr"]: - kwargs["fixed_cidr"] = netaddr.IPNetwork(kwargs["fixed_cidr"]) + try: + kwargs["fixed_cidr"] = netaddr.IPNetwork(kwargs["fixed_cidr"]) + except netaddr.AddrFormatError: + raise exception.InvalidCidr(cidr=kwargs["fixed_cidr"]) LOG.debug('Create network: |%s|', kwargs) return self._do_create_networks(context, **kwargs) @@ -1155,10 +1170,10 @@ def _index_of(subnet, ip): try: start = netaddr.IPAddress(ip) except netaddr.AddrFormatError: - raise ValueError(_("Not a valid IP Address")) + raise exception.InvalidAddress(address=ip) index = start.value - subnet.value if index < 0 or index >= subnet.size: - raise ValueError(_("IP not within cidr range")) + raise exception.AddressOutOfRange(address=ip, cidr=str(subnet)) return index def _do_create_networks(self, context, @@ -1222,13 +1237,12 @@ def find_next(subnet): subnets_v4.append(next_subnet) subnet = next_subnet else: - raise exception.CidrConflict(_('cidr already in use')) + raise exception.CidrConflict(cidr=subnet, + other=subnet) for used_subnet in used_subnets: if subnet in used_subnet: - msg = _('requested cidr (%(cidr)s) conflicts with ' - 'existing supernet (%(super)s)') - raise exception.CidrConflict( - msg % {'cidr': subnet, 'super': used_subnet}) + raise exception.CidrConflict(cidr=subnet, + other=used_subnet) if used_subnet in subnet: next_subnet = find_next(subnet) if next_subnet: @@ -1236,11 +1250,8 @@ def find_next(subnet): subnets_v4.append(next_subnet) subnet = next_subnet else: - msg = _('requested cidr (%(cidr)s) conflicts ' - 'with existing smaller cidr ' - '(%(smaller)s)') - raise exception.CidrConflict( - msg % {'cidr': subnet, 'smaller': used_subnet}) + raise exception.CidrConflict(cidr=subnet, + other=used_subnet) networks = objects.NetworkList(context=context, objects=[]) subnets = itertools.izip_longest(subnets_v4, subnets_v6) @@ -1269,17 +1280,11 @@ def find_next(subnet): if cidr and subnet_v4: current = subnet_v4[1] if allowed_start: - try: - val = self._index_of(subnet_v4, allowed_start) - except ValueError as exc: - raise ValueError('allowed_start: %s' % unicode(exc)) + val = self._index_of(subnet_v4, allowed_start) current = netaddr.IPAddress(allowed_start) bottom_reserved = val if allowed_end: - try: - val = self._index_of(subnet_v4, allowed_end) - except ValueError as exc: - raise ValueError('allowed_end: %s' % unicode(exc)) + val = self._index_of(subnet_v4, allowed_end) top_reserved = subnet_v4.size - 1 - val net.cidr = str(subnet_v4) net.netmask = str(subnet_v4.netmask) @@ -1353,8 +1358,7 @@ def delete_network(self, context, fixed_range, uuid, LOG.debug('Delete network %s', network['uuid']) if require_disassociated and network.project_id is not None: - raise ValueError(_('Network must be disassociated from project %s' - ' before delete') % network.project_id) + raise exception.NetworkHasProject(project_id=network.project_id) network.destroy() @property diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 61f8d67afc..22237bed7f 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -361,17 +361,35 @@ def test_validate_reserved_start_end(self): def test_validate_reserved_start_out_of_range(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) - self.assertRaises(ValueError, self.network.create_networks, + self.assertRaises(exception.AddressOutOfRange, + self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 256, allowed_start='192.168.1.10') def test_validate_reserved_end_invalid(self): context_admin = context.RequestContext('testuser', 'testproject', is_admin=True) - self.assertRaises(ValueError, self.network.create_networks, + self.assertRaises(exception.InvalidAddress, + self.network.create_networks, context_admin, 'fake', '192.168.0.0/24', False, 1, 256, allowed_end='invalid') + def test_validate_cidr_invalid(self): + context_admin = context.RequestContext('testuser', 'testproject', + is_admin=True) + self.assertRaises(exception.InvalidCidr, + self.network.create_networks, + context_admin, 'fake', 'invalid', False, + 1, 256) + + def test_validate_non_int_size(self): + context_admin = context.RequestContext('testuser', 'testproject', + is_admin=True) + self.assertRaises(exception.InvalidIntValue, + self.network.create_networks, + context_admin, 'fake', '192.168.0.0/24', False, + 1, 'invalid') + def test_validate_networks_none_requested_networks(self): self.network.validate_networks(self.context, None) From 71fabdc8dc69d418fa987bef03e1c70ea389f80a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 9 May 2014 13:04:46 -0700 Subject: [PATCH 429/486] Add api extension for new network fields. This uses the existing api extension to implement the actual control of the fields, but the check is based on a new dummy extension called os-extended-networks. Api sample tests added for new extension. DocImpact: Adds an extension that enables extra fields for network create. The new fields are: mtu: int (default flag) if set, nova sets the mtu on bridge. This allows network_device_mtu flag to be set per network. dhcp_server: ip (default == gateway) if different from gateway, sets nova to assume gateway is external. enable_dhcp: bool (default true) false will disable dhcp on network. share_address: bool (default flag) if specifed, network will have the same dhcp ip on every host. This allows share_dhcp_address flag to be set per network. allowed_start: ip if specified, reserves all ips before allowed_start. allowed_end: ip if specified, reserves all ips after allowed_end. Partially-implements blueprint better-support-for-multiple-networks Change-Id: I577fe5f6560be50106f345a42a826e97d5e7d64c --- .../all_extensions/extensions-get-resp.json | 8 ++ .../all_extensions/extensions-get-resp.xml | 3 + .../network-create-req.json | 12 ++ .../network-create-req.xml | 10 ++ .../network-create-resp.json | 36 ++++++ .../network-create-resp.xml | 35 ++++++ .../network-show-resp.json | 36 ++++++ .../network-show-resp.xml | 35 ++++++ .../networks-list-resp.json | 72 ++++++++++++ .../networks-list-resp.xml | 71 ++++++++++++ .../compute/contrib/extended_networks.py | 26 +++++ .../openstack/compute/contrib/os_networks.py | 48 ++++++-- nova/exception.py | 17 +-- .../compute/contrib/test_networks.py | 109 ++++++++++++++++-- .../extensions-get-resp.json.tpl | 8 ++ .../extensions-get-resp.xml.tpl | 3 + .../network-create-req.json.tpl | 12 ++ .../network-create-req.xml.tpl | 10 ++ .../network-create-resp.json.tpl | 36 ++++++ .../network-create-resp.xml.tpl | 34 ++++++ .../network-show-resp.json.tpl | 37 ++++++ .../network-show-resp.xml.tpl | 35 ++++++ .../networks-list-resp.json.tpl | 72 ++++++++++++ .../networks-list-resp.xml.tpl | 71 ++++++++++++ nova/tests/integrated/test_api_samples.py | 44 +++++++ 25 files changed, 850 insertions(+), 30 deletions(-) create mode 100644 doc/api_samples/os-extended-networks/network-create-req.json create mode 100644 doc/api_samples/os-extended-networks/network-create-req.xml create mode 100644 doc/api_samples/os-extended-networks/network-create-resp.json create mode 100644 doc/api_samples/os-extended-networks/network-create-resp.xml create mode 100644 doc/api_samples/os-extended-networks/network-show-resp.json create mode 100644 doc/api_samples/os-extended-networks/network-show-resp.xml create mode 100644 doc/api_samples/os-extended-networks/networks-list-resp.json create mode 100644 doc/api_samples/os-extended-networks/networks-list-resp.xml create mode 100644 nova/api/openstack/compute/contrib/extended_networks.py create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl create mode 100644 nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json index 81df655de3..b8fac5f559 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.json +++ b/doc/api_samples/all_extensions/extensions-get-resp.json @@ -296,6 +296,14 @@ "namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1", "updated": "2014-01-04T00:00:00Z" }, + { + "alias": "os-extended-networks", + "description": "Adds additional fields to networks", + "links": [], + "name": "ExtendedNetworks", + "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2", + "updated": "2014-05-09T00:00:00Z" + }, { "alias": "os-extended-quotas", "description": "Adds ability for admins to delete quota\n and optionally force the update Quota command.\n ", diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml index 77fde4d58b..fb56dd8f4b 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.xml +++ b/doc/api_samples/all_extensions/extensions-get-resp.xml @@ -129,6 +129,9 @@ Extended hypervisors support. + + Adds additional fields to networks + Adds ability for admins to delete quota and optionally force the update Quota command. diff --git a/doc/api_samples/os-extended-networks/network-create-req.json b/doc/api_samples/os-extended-networks/network-create-req.json new file mode 100644 index 0000000000..18515bd6c4 --- /dev/null +++ b/doc/api_samples/os-extended-networks/network-create-req.json @@ -0,0 +1,12 @@ +{ + "network": { + "label": "new net 111", + "cidr": "10.20.105.0/24", + "mtu": 9000, + "dhcp_server": "10.20.105.2", + "enable_dhcp": false, + "share_address": true, + "allowed_start": "10.20.105.10", + "allowed_end": "10.20.105.200" + } +} diff --git a/doc/api_samples/os-extended-networks/network-create-req.xml b/doc/api_samples/os-extended-networks/network-create-req.xml new file mode 100644 index 0000000000..3cc79bd837 --- /dev/null +++ b/doc/api_samples/os-extended-networks/network-create-req.xml @@ -0,0 +1,10 @@ + + + 10.20.105.0/24 + 9000 + 10.20.105.2 + False + True + 10.20.105.10 + 10.20.105.200 + diff --git a/doc/api_samples/os-extended-networks/network-create-resp.json b/doc/api_samples/os-extended-networks/network-create-resp.json new file mode 100644 index 0000000000..4364e50b2d --- /dev/null +++ b/doc/api_samples/os-extended-networks/network-create-resp.json @@ -0,0 +1,36 @@ +{ + "network": { + "bridge": null, + "bridge_interface": null, + "broadcast": "10.20.105.255", + "cidr": "10.20.105.0/24", + "cidr_v6": null, + "created_at": null, + "deleted": null, + "deleted_at": null, + "dhcp_server": "10.20.105.2", + "dhcp_start": "10.20.105.2", + "dns1": null, + "dns2": null, + "enable_dhcp": false, + "gateway": "10.20.105.1", + "gateway_v6": null, + "host": null, + "id": "d7a17c0c-457e-4ab4-a99c-4fa1762f5359", + "injected": null, + "label": "new net 111", + "mtu": 9000, + "multi_host": null, + "netmask": "255.255.255.0", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "share_address": true, + "updated_at": null, + "vlan": null, + "vpn_private_address": null, + "vpn_public_address": null, + "vpn_public_port": null + } +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-networks/network-create-resp.xml b/doc/api_samples/os-extended-networks/network-create-resp.xml new file mode 100644 index 0000000000..9f16171ed7 --- /dev/null +++ b/doc/api_samples/os-extended-networks/network-create-resp.xml @@ -0,0 +1,35 @@ + + + None + None + 10.20.105.2 + None + True + None + a931ead3-4c5c-4b85-a90e-b248ffa71134 + None + None + 10.20.105.1 + None + + None + None + None + False + None + 10.20.105.255 + 255.255.255.0 + None + 10.20.105.0/24 + None + None + False + None + None + None + 9000 + None + None + 10.20.105.2 + None + \ No newline at end of file diff --git a/doc/api_samples/os-extended-networks/network-show-resp.json b/doc/api_samples/os-extended-networks/network-show-resp.json new file mode 100644 index 0000000000..9741395c63 --- /dev/null +++ b/doc/api_samples/os-extended-networks/network-show-resp.json @@ -0,0 +1,36 @@ +{ + "network": { + "bridge": "br100", + "bridge_interface": "eth0", + "broadcast": "10.0.0.7", + "cidr": "10.0.0.0/29", + "cidr_v6": null, + "created_at": "2011-08-15T06:19:19.387525", + "deleted": false, + "deleted_at": null, + "dhcp_server": "10.0.0.1", + "dhcp_start": "10.0.0.3", + "dns1": null, + "dns2": null, + "enable_dhcp": true, + "gateway": "10.0.0.1", + "gateway_v6": null, + "host": "nsokolov-desktop", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "injected": false, + "label": "mynet_0", + "mtu": null, + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": "1234", + "rxtx_base": null, + "share_address": false, + "updated_at": "2011-08-16T09:26:13.048257", + "vlan": 100, + "vpn_private_address": "10.0.0.2", + "vpn_public_address": "127.0.0.1", + "vpn_public_port": 1000 + } +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-networks/network-show-resp.xml b/doc/api_samples/os-extended-networks/network-show-resp.xml new file mode 100644 index 0000000000..2f3176fbc3 --- /dev/null +++ b/doc/api_samples/os-extended-networks/network-show-resp.xml @@ -0,0 +1,35 @@ + + + br100 + 1000 + 10.0.0.3 + eth0 + False + 2011-08-16 09:26:13.048257 + 20c8acc0-f747-4d71-a389-46d078ebf047 + None + None + 10.0.0.1 + None + + None + 1234 + 10.0.0.2 + False + 100 + 10.0.0.7 + 255.255.255.248 + False + 10.0.0.0/29 + 127.0.0.1 + False + True + None + 2011-08-15 06:19:19.387525 + nsokolov-desktop + None + None + None + 10.0.0.1 + None + \ No newline at end of file diff --git a/doc/api_samples/os-extended-networks/networks-list-resp.json b/doc/api_samples/os-extended-networks/networks-list-resp.json new file mode 100644 index 0000000000..49bdad5826 --- /dev/null +++ b/doc/api_samples/os-extended-networks/networks-list-resp.json @@ -0,0 +1,72 @@ +{ + "networks": [ + { + "bridge": "br100", + "bridge_interface": "eth0", + "broadcast": "10.0.0.7", + "cidr": "10.0.0.0/29", + "cidr_v6": null, + "created_at": "2011-08-15T06:19:19.387525", + "deleted": false, + "deleted_at": null, + "dhcp_server": "10.0.0.1", + "dhcp_start": "10.0.0.3", + "dns1": null, + "dns2": null, + "enable_dhcp": true, + "gateway": "10.0.0.1", + "gateway_v6": null, + "host": "nsokolov-desktop", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "injected": false, + "label": "mynet_0", + "mtu": null, + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": "1234", + "rxtx_base": null, + "share_address": false, + "updated_at": "2011-08-16T09:26:13.048257", + "vlan": 100, + "vpn_private_address": "10.0.0.2", + "vpn_public_address": "127.0.0.1", + "vpn_public_port": 1000 + }, + { + "bridge": "br101", + "bridge_interface": "eth0", + "broadcast": "10.0.0.15", + "cidr": "10.0.0.10/29", + "cidr_v6": null, + "created_at": "2011-08-15T06:19:19.885495", + "deleted": false, + "deleted_at": null, + "dhcp_server": "10.0.0.9", + "dhcp_start": "10.0.0.11", + "dns1": null, + "dns2": null, + "enable_dhcp": true, + "gateway": "10.0.0.9", + "gateway_v6": null, + "host": null, + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "injected": false, + "label": "mynet_1", + "mtu": null, + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "share_address": false, + "updated_at": null, + "vlan": 101, + "vpn_private_address": "10.0.0.10", + "vpn_public_address": null, + "vpn_public_port": 1001 + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-extended-networks/networks-list-resp.xml b/doc/api_samples/os-extended-networks/networks-list-resp.xml new file mode 100644 index 0000000000..b3b6e8885f --- /dev/null +++ b/doc/api_samples/os-extended-networks/networks-list-resp.xml @@ -0,0 +1,71 @@ + + + + br100 + 1000 + 10.0.0.3 + eth0 + False + 2011-08-16 09:26:13.048257 + 20c8acc0-f747-4d71-a389-46d078ebf047 + None + None + 10.0.0.1 + None + + None + 1234 + 10.0.0.2 + False + 100 + 10.0.0.7 + 255.255.255.248 + False + 10.0.0.0/29 + 127.0.0.1 + False + True + None + 2011-08-15 06:19:19.387525 + nsokolov-desktop + None + None + None + 10.0.0.1 + None + + + br101 + 1001 + 10.0.0.11 + eth0 + False + None + 20c8acc0-f747-4d71-a389-46d078ebf000 + None + None + 10.0.0.9 + None + + None + None + 10.0.0.10 + False + 101 + 10.0.0.15 + 255.255.255.248 + False + 10.0.0.10/29 + None + False + True + None + 2011-08-15 06:19:19.885495 + None + None + None + None + 10.0.0.9 + None + + \ No newline at end of file diff --git a/nova/api/openstack/compute/contrib/extended_networks.py b/nova/api/openstack/compute/contrib/extended_networks.py new file mode 100644 index 0000000000..f5021a48dc --- /dev/null +++ b/nova/api/openstack/compute/contrib/extended_networks.py @@ -0,0 +1,26 @@ +# Copyright 2014 Nebula, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.openstack import extensions + + +class Extended_networks(extensions.ExtensionDescriptor): + """Adds additional fields to networks.""" + + name = "ExtendedNetworks" + alias = "os-extended-networks" + namespace = ("http://docs.openstack.org/compute/ext/extended_networks" + "/api/v2") + updated = "2014-05-09T00:00:00Z" diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py index 2cbb46ff4e..cbb30e68ea 100644 --- a/nova/api/openstack/compute/contrib/os_networks.py +++ b/nova/api/openstack/compute/contrib/os_networks.py @@ -30,9 +30,10 @@ authorize = extensions.extension_authorizer('compute', 'networks') authorize_view = extensions.extension_authorizer('compute', 'networks:view') +extended_fields = ('mtu', 'dhcp_server', 'enable_dhcp', 'share_address') -def network_dict(context, network): +def network_dict(context, network, extended): fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', @@ -46,6 +47,8 @@ def network_dict(context, network): # are only visible if they are an admin. if context.is_admin: fields += admin_fields + if extended: + fields += extended_fields result = dict((field, network.get(field)) for field in fields) uuid = network.get('uuid') if uuid: @@ -57,14 +60,19 @@ def network_dict(context, network): class NetworkController(wsgi.Controller): - def __init__(self, network_api=None): + def __init__(self, network_api=None, ext_mgr=None): self.network_api = network_api or network.API() + if ext_mgr: + self.extended = ext_mgr.is_loaded('os-extended-networks') + else: + self.extended = False def index(self, req): context = req.environ['nova.context'] authorize_view(context) networks = self.network_api.get_all(context) - result = [network_dict(context, net_ref) for net_ref in networks] + result = [network_dict(context, net_ref, self.extended) + for net_ref in networks] return {'networks': result} @wsgi.action("disassociate") @@ -93,7 +101,7 @@ def show(self, req, id): except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) - return {'network': network_dict(context, network)} + return {'network': network_dict(context, network, self.extended)} def delete(self, req, id): context = req.environ['nova.context'] @@ -113,7 +121,7 @@ def create(self, req, body): authorize(context) def bad(e): - return exc.HTTPUnprocessableEntity(explanation=e) + return exc.HTTPBadRequest(explanation=e) if not (body and body.get("network")): raise bad(_("Missing network in body")) @@ -126,13 +134,31 @@ def bad(e): if not cidr: raise bad(_("Network cidr or cidr_v6 is required")) - LOG.debug("Creating network with label %s", params["label"]) + if params.get("project_id") == "": + params["project_id"] = None - params["num_networks"] = 1 - params["network_size"] = netaddr.IPNetwork(cidr).size + LOG.debug("Creating network with label %s", params["label"]) - network = self.network_api.create(context, **params)[0] - return {"network": network_dict(context, network)} + try: + params["num_networks"] = 1 + try: + params["network_size"] = netaddr.IPNetwork(cidr).size + except netaddr.AddrFormatError: + raise exception.InvalidCidr(cidr=cidr) + if not self.extended: + create_params = ('allowed_start', 'allowed_end') + for field in extended_fields + create_params: + if field in params: + del params[field] + + network = self.network_api.create(context, **params)[0] + except exception.NovaException as ex: + if ex.code == 400: + raise bad(ex.format_message()) + elif ex.code == 409: + raise exc.HTTPConflict(explanation=ex.format_message()) + raise + return {"network": network_dict(context, network, self.extended)} def add(self, req, body): context = req.environ['nova.context'] @@ -177,7 +203,7 @@ def get_resources(self): collection_actions = {'add': 'POST'} res = extensions.ResourceExtension( 'os-networks', - NetworkController(), + NetworkController(ext_mgr=self.ext_mgr), member_actions=member_actions, collection_actions=collection_actions) return [res] diff --git a/nova/exception.py b/nova/exception.py index d3677ac9af..1a1923dbca 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -590,32 +590,27 @@ class NetworkInUse(NovaException): msg_fmt = _("Network %(network_id)s is still in use.") -class InvalidNetworkParam(Invalid): - # NOTE(vish) base class for network create param errors - code = 422 - - -class NetworkNotCreated(InvalidNetworkParam): +class NetworkNotCreated(Invalid): msg_fmt = _("%(req)s is required to create a network.") -class LabelTooLong(InvalidNetworkParam): +class LabelTooLong(Invalid): msg_fmt = _("Maximum allowed length for 'label' is 255.") -class InvalidIntValue(InvalidNetworkParam): +class InvalidIntValue(Invalid): msg_fmt = _("%(key)s must be an integer.") -class InvalidCidr(InvalidNetworkParam): +class InvalidCidr(Invalid): msg_fmt = _("%(cidr)s is not a valid ip network.") -class InvalidAddress(InvalidNetworkParam): +class InvalidAddress(Invalid): msg_fmt = _("%(address)s is not a valid ip address.") -class AddressOutOfRange(InvalidNetworkParam): +class AddressOutOfRange(Invalid): msg_fmt = _("%(address)s is not within %(cidr)s.") diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py index 48fe473217..5b9a8a1a2d 100644 --- a/nova/tests/api/openstack/compute/contrib/test_networks.py +++ b/nova/tests/api/openstack/compute/contrib/test_networks.py @@ -27,8 +27,11 @@ from nova.api.openstack.compute.contrib import networks_associate from nova.api.openstack.compute.contrib import os_networks as networks from nova.api.openstack.compute.contrib import os_tenant_networks as tnet +from nova.api.openstack import extensions import nova.context from nova import exception +from nova.network import manager +from nova import objects from nova import test from nova.tests.api.openstack import fakes import nova.utils @@ -52,6 +55,8 @@ 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop', 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525), + 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True, + 'share_address': False, }, { 'bridge': 'br101', 'vpn_public_port': 1001, @@ -67,6 +72,8 @@ 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None, 'gateway_v6': None, 'netmask_v6': None, 'priority': None, 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495), + 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True, + 'share_address': False, }, ] @@ -198,13 +205,83 @@ def create(self, context, **kwargs): return new_networks +# NOTE(vish): tests that network create Exceptions actually return +# the proper error responses +class NetworkCreateExceptionsTest(test.TestCase): + + def setUp(self): + super(NetworkCreateExceptionsTest, self).setUp() + ext_mgr = extensions.ExtensionManager() + ext_mgr.extensions = {'os-extended-networks': 'fake'} + + class PassthroughAPI(): + def __init__(self): + self.network_manager = manager.FlatDHCPManager() + + def create(self, *args, **kwargs): + return self.network_manager.create_networks(*args, **kwargs) + + self.controller = networks.NetworkController( + PassthroughAPI(), ext_mgr) + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + + def test_network_create_bad_vlan(self): + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['vlan_start'] = 'foo' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, net) + + def test_network_create_no_cidr(self): + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['cidr'] = '' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, net) + + def test_network_create_invalid_fixed_cidr(self): + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['fixed_cidr'] = 'foo' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, net) + + def test_network_create_invalid_start(self): + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['allowed_start'] = 'foo' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, net) + + def test_network_create_cidr_conflict(self): + + @staticmethod + def get_all(context): + ret = objects.NetworkList(context=context, objects=[]) + net = objects.Network(cidr='10.0.0.0/23') + ret.objects.append(net) + return ret + + self.stubs.Set(objects.NetworkList, 'get_all', get_all) + + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['cidr'] = '10.0.0.0/24' + self.assertRaises(webob.exc.HTTPConflict, + self.controller.create, req, net) + + class NetworksTest(test.NoDBTestCase): def setUp(self): super(NetworksTest, self).setUp() self.fake_network_api = FakeNetworkAPI() + ext_mgr = extensions.ExtensionManager() + ext_mgr.extensions = {'os-extended-networks': 'fake'} self.controller = networks.NetworkController( - self.fake_network_api) + self.fake_network_api, + ext_mgr) self.associate_controller = networks_associate\ .NetworkAssociateActionController(self.fake_network_api) fakes.stub_out_networking(self.stubs) @@ -313,13 +390,6 @@ def test_network_delete_in_use(self): self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, req, -1) - def test_network_add_vlan_disabled(self): - self.fake_network_api.disable_vlan() - uuid = FAKE_NETWORKS[1]['uuid'] - req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add') - self.assertRaises(webob.exc.HTTPNotImplemented, - self.controller.add, req, {'id': uuid}) - def test_network_add(self): uuid = FAKE_NETWORKS[1]['uuid'] req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add') @@ -359,6 +429,29 @@ def test_network_create_large(self): self.assertEqual(res_dict['network']['cidr'], large_network['network']['cidr']) + def test_network_create_not_extended(self): + self.stubs.Set(self.controller, 'extended', False) + + # NOTE(vish): Verify that new params are not passed through if + # extension is not enabled. + def no_mtu(*args, **kwargs): + if 'mtu' in kwargs: + raise test.TestingException("mtu should not pass through") + return [{}] + + self.stubs.Set(self.controller.network_api, 'create', no_mtu) + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['mtu'] = 9000 + self.controller.create(req, net) + + def test_network_create_bad_cidr(self): + req = fakes.HTTPRequest.blank('/v2/1234/os-networks') + net = copy.deepcopy(NEW_NETWORK) + net['network']['cidr'] = '128.0.0.0/900' + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, net) + def test_network_neutron_associate_not_implemented(self): uuid = FAKE_NETWORKS[1]['uuid'] self.flags(network_api_class='nova.network.neutronv2.api.API') diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl index c7e4783743..6830422bff 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl @@ -488,6 +488,14 @@ "namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1", "updated": "%(isotime)s" }, + { + "alias": "os-extended-networks", + "description": "%(text)s", + "links": [], + "name": "ExtendedNetworks", + "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2", + "updated": "%(isotime)s" + }, { "alias": "os-extended-quotas", "description": "%(text)s", diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl index ef98730514..9cacb12676 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl @@ -174,6 +174,9 @@ %(text)s + + %(text)s + %(text)s diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl new file mode 100644 index 0000000000..18515bd6c4 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl @@ -0,0 +1,12 @@ +{ + "network": { + "label": "new net 111", + "cidr": "10.20.105.0/24", + "mtu": 9000, + "dhcp_server": "10.20.105.2", + "enable_dhcp": false, + "share_address": true, + "allowed_start": "10.20.105.10", + "allowed_end": "10.20.105.200" + } +} diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl new file mode 100644 index 0000000000..3cc79bd837 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl @@ -0,0 +1,10 @@ + + + 10.20.105.0/24 + 9000 + 10.20.105.2 + False + True + 10.20.105.10 + 10.20.105.200 + diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl new file mode 100644 index 0000000000..5cf155b13f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl @@ -0,0 +1,36 @@ +{ + "network": { + "bridge": null, + "vpn_public_port": null, + "dhcp_start": "%(ip)s", + "bridge_interface": null, + "updated_at": null, + "id": "%(id)s", + "cidr_v6": null, + "deleted_at": null, + "gateway": "%(ip)s", + "rxtx_base": null, + "label": "new net 111", + "priority": null, + "project_id": null, + "vpn_private_address": null, + "deleted": null, + "vlan": null, + "broadcast": "%(ip)s", + "netmask": "%(ip)s", + "injected": null, + "cidr": "10.20.105.0/24", + "vpn_public_address": null, + "multi_host": null, + "dns2": null, + "created_at": null, + "host": null, + "gateway_v6": null, + "netmask_v6": null, + "dns1": null, + "mtu": 9000, + "dhcp_server": "10.20.105.2", + "enable_dhcp": false, + "share_address": true + } +} diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl new file mode 100644 index 0000000000..3a757c5f2f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl @@ -0,0 +1,34 @@ + + None + None + %(ip)s + None + None + %(id)s + None + None + %(ip)s + None + + None + None + None + False + None + %(ip)s + %(ip)s + None + 10.20.105.0/24 + None + None + None + None + None + None + None + None + 9000 + 10.20.105.2 + False + True + diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl new file mode 100644 index 0000000000..ac75fe7fb1 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl @@ -0,0 +1,37 @@ +{ + "network": + { + "bridge": "br100", + "bridge_interface": "eth0", + "broadcast": "%(ip)s", + "cidr": "10.0.0.0/29", + "cidr_v6": null, + "created_at": "%(strtime)s", + "deleted": false, + "deleted_at": null, + "dhcp_start": "%(ip)s", + "dns1": null, + "dns2": null, + "gateway": "%(ip)s", + "gateway_v6": null, + "host": "nsokolov-desktop", + "id": "%(id)s", + "injected": false, + "label": "mynet_0", + "multi_host": false, + "netmask": "%(ip)s", + "netmask_v6": null, + "priority": null, + "project_id": "1234", + "rxtx_base": null, + "updated_at": "%(strtime)s", + "vlan": 100, + "vpn_private_address": "%(ip)s", + "vpn_public_address": "%(ip)s", + "vpn_public_port": 1000, + "mtu": null, + "dhcp_server": "%(ip)s", + "enable_dhcp": true, + "share_address": false + } +} diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl new file mode 100644 index 0000000000..3139ca88a8 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl @@ -0,0 +1,35 @@ + + + br100 + 1000 + %(ip)s + eth0 + %(xmltime)s + %(id)s + None + None + %(ip)s + None + + None + 1234 + %(ip)s + False + 100 + %(ip)s + %(ip)s + False + 10.0.0.0/29 + %(ip)s + False + None + %(xmltime)s + nsokolov-desktop + None + None + None + None + %(ip)s + True + False + diff --git a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl new file mode 100644 index 0000000000..ccdd586a0f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl @@ -0,0 +1,72 @@ +{ + "networks": [ + { + "bridge": "br100", + "bridge_interface": "eth0", + "broadcast": "%(ip)s", + "cidr": "10.0.0.0/29", + "cidr_v6": null, + "created_at": "%(strtime)s", + "deleted": false, + "deleted_at": null, + "dhcp_start": "%(ip)s", + "dns1": null, + "dns2": null, + "gateway": "%(ip)s", + "gateway_v6": null, + "host": "nsokolov-desktop", + "id": "%(id)s", + "injected": false, + "label": "mynet_0", + "multi_host": false, + "netmask": "%(ip)s", + "netmask_v6": null, + "priority": null, + "project_id": "1234", + "rxtx_base": null, + "updated_at": "%(strtime)s", + "vlan": 100, + "vpn_private_address": "%(ip)s", + "vpn_public_address": "%(ip)s", + "vpn_public_port": 1000, + "mtu": null, + "dhcp_server": "%(ip)s", + "enable_dhcp": true, + "share_address": false + }, + { + "bridge": "br101", + "bridge_interface": "eth0", + "broadcast": "%(ip)s", + "cidr": "10.0.0.10/29", + "cidr_v6": null, + "created_at": "%(strtime)s", + "deleted": false, + "deleted_at": null, + "dhcp_start": "%(ip)s", + "dns1": null, + "dns2": null, + "gateway": "%(ip)s", + "gateway_v6": null, + "host": null, + "id": "%(id)s", + "injected": false, + "label": "mynet_1", + "multi_host": false, + "netmask": "%(ip)s", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "updated_at": null, + "vlan": 101, + "vpn_private_address": "%(ip)s", + "vpn_public_address": null, + "vpn_public_port": 1001, + "mtu": null, + "dhcp_server": "%(ip)s", + "enable_dhcp": true, + "share_address": false + } + ] +} diff --git a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl new file mode 100644 index 0000000000..0b7f456402 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl @@ -0,0 +1,71 @@ + + + + br100 + 1000 + %(ip)s + eth0 + %(xmltime)s + %(id)s + None + None + %(ip)s + None + + None + 1234 + %(ip)s + False + 100 + %(ip)s + %(ip)s + False + 10.0.0.0/29 + %(ip)s + False + None + %(xmltime)s + nsokolov-desktop + None + None + None + None + %(ip)s + True + False + + + br101 + 1001 + %(ip)s + eth0 + None + %(id)s + None + None + %(ip)s + None + + None + None + %(ip)s + False + 101 + %(ip)s + %(ip)s + False + 10.0.0.10/29 + None + False + None + %(xmltime)s + None + None + None + None + None + %(ip)s + True + False + + diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 4707a74889..0ccb3b0823 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -2669,6 +2669,50 @@ class NetworksXmlTests(NetworksJsonTests): ctype = 'xml' +class ExtendedNetworksJsonTests(ApiSampleTestBaseV2): + extends_name = ("nova.api.openstack.compute.contrib." + "os_networks.Os_networks") + extension_name = ("nova.api.openstack.compute.contrib." + "extended_networks.Extended_networks") + + def setUp(self): + super(ExtendedNetworksJsonTests, self).setUp() + fake_network_api = test_networks.FakeNetworkAPI() + self.stubs.Set(network_api.API, "get_all", + fake_network_api.get_all) + self.stubs.Set(network_api.API, "get", + fake_network_api.get) + self.stubs.Set(network_api.API, "associate", + fake_network_api.associate) + self.stubs.Set(network_api.API, "delete", + fake_network_api.delete) + self.stubs.Set(network_api.API, "create", + fake_network_api.create) + self.stubs.Set(network_api.API, "add_network_to_project", + fake_network_api.add_network_to_project) + + def test_network_list(self): + response = self._do_get('os-networks') + subs = self._get_regexes() + self._verify_response('networks-list-resp', subs, response, 200) + + def test_network_show(self): + uuid = test_networks.FAKE_NETWORKS[0]['uuid'] + response = self._do_get('os-networks/%s' % uuid) + subs = self._get_regexes() + self._verify_response('network-show-resp', subs, response, 200) + + def test_network_create(self): + response = self._do_post("os-networks", + 'network-create-req', {}) + subs = self._get_regexes() + self._verify_response('network-create-resp', subs, response, 200) + + +class ExtendedNetworksXmlTests(ExtendedNetworksJsonTests): + ctype = 'xml' + + class NetworksAssociateJsonTests(ApiSampleTestBaseV2): extension_name = ("nova.api.openstack.compute.contrib" ".networks_associate.Networks_associate") From 9bbd96d94384b79631196417c8b29fad02772c41 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Tue, 12 Aug 2014 01:59:17 +0000 Subject: [PATCH 430/486] Fix the exception for a nonexistent flavor flavor_get_by_flavor_id() raises a FlavorNotFound exception when trying to delete a nonexistent flavor and REST API layer should catch the exception. However, one of v2 API catches a NotFound exception instead, and it has been fixed on v3 API. This patch fixes the one of v2 API. This patch is required for v2.1 API development. On the development, we are sharing current v2 unit tests between v2 and v2.1 for avoiding duplicated code. Now on v3 API which is source of v2.1, "delete a flavor" API catches a FlavorNotFound exception. So current unit test test_delete fails against v2.1 API if not merging this patch, because current v2 unit test raises more generic exception NotFound but v3 "delete a flavor" API catches more specific exception FlavorNotFound. Partially implements blueprint v2-on-v3-api Change-Id: Iec7bca55f0e9f4b8a85f3b9cd54d8deff2904f86 --- nova/api/openstack/compute/contrib/flavormanage.py | 2 +- nova/tests/api/openstack/compute/contrib/test_flavor_manage.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/compute/contrib/flavormanage.py b/nova/api/openstack/compute/contrib/flavormanage.py index 707253be6b..8e7e93ffc1 100644 --- a/nova/api/openstack/compute/contrib/flavormanage.py +++ b/nova/api/openstack/compute/contrib/flavormanage.py @@ -39,7 +39,7 @@ def _delete(self, req, id): try: flavor = flavors.get_flavor_by_flavor_id( id, ctxt=context, read_deleted="no") - except exception.NotFound as e: + except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) flavors.destroy(flavor['name']) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py index 925e2ab761..66ad9c645c 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py @@ -30,7 +30,7 @@ def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'): if flavorid == 'failtest': - raise exception.NotFound("Not found sucka!") + raise exception.FlavorNotFound(flavor_id=flavorid) elif not str(flavorid) == '1234': raise Exception("This test expects flavorid 1234, not %s" % flavorid) if read_deleted != 'no': From a41543820a8f0686612213b12ab07cf70b3d997f Mon Sep 17 00:00:00 2001 From: Sagar Ratnakara Nikam Date: Wed, 2 Jul 2014 20:21:24 +0530 Subject: [PATCH 431/486] HyperV Driver - Fix to implement hypervisor-uptime The hyperv driver currently does not return the hypervisor-uptime. This fix does the necessary changes to return the hypervisor-uptime. The uptime returned is in similar format to libvirt driver uptime The uptime is calculated using the windows function GetTickCount64 More details about this function available at http://msdn.microsoft.com/en-us/library/windows/desktop/ms724411%28v=vs.85%29.aspx The function GetTickCount64 returns in millisecs the time since the host is up. Change-Id: Ib3cd90b17f64e369badaddb764ac9e6b9f6c1a4b Closes-Bug: #1335559 --- nova/tests/virt/hyperv/test_hostutils.py | 34 ++++++++++++++++++++++++ nova/tests/virt/hyperv/test_hypervapi.py | 30 +++++++++++++++++++++ nova/virt/hyperv/driver.py | 3 +++ nova/virt/hyperv/hostops.py | 22 +++++++++++++++ nova/virt/hyperv/hostutils.py | 3 +++ 5 files changed, 92 insertions(+) create mode 100644 nova/tests/virt/hyperv/test_hostutils.py diff --git a/nova/tests/virt/hyperv/test_hostutils.py b/nova/tests/virt/hyperv/test_hostutils.py new file mode 100644 index 0000000000..9deeb816a7 --- /dev/null +++ b/nova/tests/virt/hyperv/test_hostutils.py @@ -0,0 +1,34 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova import test +from nova.virt.hyperv import hostutils + + +class HostUtilsTestCase(test.NoDBTestCase): + """Unit tests for the Hyper-V hostutils class.""" + + def setUp(self): + self._hostutils = hostutils.HostUtils() + self._hostutils._conn_cimv2 = mock.MagicMock() + super(HostUtilsTestCase, self).setUp() + + @mock.patch('nova.virt.hyperv.hostutils.ctypes') + def test_get_host_tick_count64(self, mock_ctypes): + tick_count64 = "100" + mock_ctypes.windll.kernel32.GetTickCount64.return_value = tick_count64 + response = self._hostutils.get_host_tick_count64() + self.assertEqual(tick_count64, response) diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py index 99b717e65e..3817d5369a 100644 --- a/nova/tests/virt/hyperv/test_hypervapi.py +++ b/nova/tests/virt/hyperv/test_hypervapi.py @@ -17,6 +17,7 @@ """ import contextlib +import datetime import io import os import platform @@ -50,6 +51,7 @@ from nova.virt.hyperv import basevolumeutils from nova.virt.hyperv import constants from nova.virt.hyperv import driver as driver_hyperv +from nova.virt.hyperv import hostops from nova.virt.hyperv import hostutils from nova.virt.hyperv import livemigrationutils from nova.virt.hyperv import networkutils @@ -341,6 +343,13 @@ def test_list_instances(self): self.assertEqual(instances, fake_instances) + def test_get_host_uptime(self): + fake_host = "fake_host" + with mock.patch.object(self._conn._hostops, + "get_host_uptime") as mock_uptime: + self._conn._hostops.get_host_uptime(fake_host) + mock_uptime.assert_called_once_with(fake_host) + def test_get_info(self): self._instance_data = self._get_instance_data() @@ -1828,3 +1837,24 @@ def test_get_free_controller_slot_exception(self): self.assertRaises(vmutils.HyperVException, self.volumeops._get_free_controller_slot, fake_scsi_controller_path) + + +class HostOpsTestCase(HyperVAPIBaseTestCase): + """Unit tests for the Hyper-V hostops class.""" + + def setUp(self): + self._hostops = hostops.HostOps() + self._hostops._hostutils = mock.MagicMock() + self._hostops.time = mock.MagicMock() + super(HostOpsTestCase, self).setUp() + + @mock.patch('nova.virt.hyperv.hostops.time') + def test_host_uptime(self, mock_time): + self._hostops._hostutils.get_host_tick_count64.return_value = 100 + mock_time.strftime.return_value = "01:01:01" + + result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % ( + str(datetime.timedelta( + milliseconds = long(100)))) + actual_uptime = self._hostops.get_host_uptime() + self.assertEqual(result_uptime, actual_uptime) diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 7570bfa4d3..272766bb39 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -213,5 +213,8 @@ def finish_migration(self, context, migration, instance, disk_info, def get_host_ip_addr(self): return self._hostops.get_host_ip_addr() + def get_host_uptime(self, host): + return self._hostops.get_host_uptime() + def get_rdp_console(self, context, instance): return self._rdpconsoleops.get_rdp_console(instance) diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py index 994efc8b67..e5f42009d7 100644 --- a/nova/virt/hyperv/hostops.py +++ b/nova/virt/hyperv/hostops.py @@ -16,8 +16,10 @@ """ Management class for host operations. """ +import datetime import os import platform +import time from oslo.config import cfg @@ -177,3 +179,23 @@ def get_host_ip_addr(self): host_ip = self._hostutils.get_local_ips()[0] LOG.debug("Host IP address is: %s", host_ip) return host_ip + + def get_host_uptime(self): + """Returns the host uptime.""" + + tick_count64 = self._hostutils.get_host_tick_count64() + + # format the string to match libvirt driver uptime + # Libvirt uptime returns a combination of the following + # - curent host time + # - time since host is up + # - number of logged in users + # - cpu load + # Since the Windows function GetTickCount64 returns only + # the time since the host is up, returning 0s for cpu load + # and number of logged in users. + # This is done to ensure the format of the returned + # value is same as in libvirt + return "%s up %s, 0 users, load average: 0, 0, 0" % ( + str(time.strftime("%H:%M:%S")), + str(datetime.timedelta(milliseconds=long(tick_count64)))) diff --git a/nova/virt/hyperv/hostutils.py b/nova/virt/hyperv/hostutils.py index e7f4534132..e7fc9b68e8 100644 --- a/nova/virt/hyperv/hostutils.py +++ b/nova/virt/hyperv/hostutils.py @@ -75,3 +75,6 @@ def get_local_ips(self): # Returns IPv4 and IPv6 addresses, ordered by protocol family addr_info.sort() return [a[4][0] for a in addr_info] + + def get_host_tick_count64(self): + return ctypes.windll.kernel32.GetTickCount64() From 967c101fabf7edb00c70730e8e7f298ad9d60437 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 19 Aug 2014 12:54:04 -0700 Subject: [PATCH 432/486] Extend the docstring for obj_make_compatible() with examples This adds to the docstring for obj_make_compatible() to help outline the rules and scenarios to help developers understand what they need to do in that method, and when. Change-Id: Ifa8cb9a4f9a1a4d5afd0da4629a7d476a52c62d6 --- nova/objects/base.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/nova/objects/base.py b/nova/objects/base.py index 45b258c64e..9a17257b04 100644 --- a/nova/objects/base.py +++ b/nova/objects/base.py @@ -341,7 +341,19 @@ def obj_make_compatible(self, primitive, target_version): This is responsible for taking the primitive representation of an object and making it suitable for the given target_version. This may mean converting the format of object attributes, removing - attributes that have been added since the target version, etc. + attributes that have been added since the target version, etc. In + general: + + - If a new version of an object adds a field, this routine + should remove it for older versions. + - If a new version changed or restricted the format of a field, this + should convert it back to something a client knowing only of the + older version will tolerate. + - If an object that this object depends on is bumped, then this + object should also take a version bump. Then, this routine should + backlevel the dependent object (by calling its obj_make_compatible()) + if the requested version of this object is older than the version + where the new dependent object was added. :param:primitive: The result of self.obj_to_primitive() :param:target_version: The version string requested by the recipient From 630a7f369a76eaf7f942d8989a30f5dc7b09327e Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 20 Aug 2014 07:56:54 -0700 Subject: [PATCH 433/486] Fix expected error details from jsonschema The jsonschema 2.4.0 release slightly changed some of the error messages that are provided when validation fails. This updates the unit tests to expect the proper string and avoid failing as a result. Change-Id: Ic8e32140e49e394659e7ebbecb28afb704b23b7c Closes-bug: #1358881 --- nova/tests/test_api_validation.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/nova/tests/test_api_validation.py b/nova/tests/test_api_validation.py index e055817489..d280e35705 100644 --- a/nova/tests/test_api_validation.py +++ b/nova/tests/test_api_validation.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import re + from nova.api import validation from nova.api.validation import parameter_types from nova import exception @@ -24,11 +26,10 @@ def check_validation_error(self, method, body, expected_detail): try: method(body=body) except exception.ValidationError as ex: - expected_kwargs = { - 'code': 400, - 'detail': expected_detail - } - self.assertEqual(ex.kwargs, expected_kwargs) + self.assertEqual(400, ex.kwargs['code']) + if not re.match(expected_detail, ex.kwargs['detail']): + self.assertEqual(expected_detail, ex.kwargs['detail'], + 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: @@ -356,22 +357,22 @@ def test_validate_integer_range(self): def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. Value: 0." - " 0.0 is less than the minimum of 1") + " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." - " 11.0 is greater than the maximum of 10") + " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0." - " 0.0 is less than the minimum of 1") + " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." - " 11.0 is greater than the maximum of 10") + " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) @@ -597,7 +598,7 @@ def test_validate_tcp_udp_port_fails(self): expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 65536." - " 65536.0 is greater than the maximum of 65535") + " 65536(.0)? is greater than the maximum of 65535") self.check_validation_error(self.post, body={'foo': 65536}, expected_detail=detail) From 3440713df237b3a315652f9f331a6f3e9d0a44a2 Mon Sep 17 00:00:00 2001 From: Phil Day Date: Wed, 20 Aug 2014 16:45:10 +0000 Subject: [PATCH 434/486] Fix ability to generate object hashes in test_objects.py Change https://review.openstack.org/#/c/114260/3 broke the ability to generate object hash codes by setting GENERATE_HASHES in the environment. Change-Id: I08e015b44ac12655d33e0b04d5608d68c1f13270 --- nova/tests/objects/test_objects.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 05d20f9fce..851d4a1858 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -1030,7 +1030,7 @@ def test_versions(self): if os.getenv('GENERATE_HASHES'): file('object_hashes.txt', 'w').write( - pprint.pformat(self._fingerprints)) + pprint.pformat(fingerprints)) raise test.TestingException( 'Generated hashes in object_hashes.txt') From f1550b2357881df6ec8c2c63ca41bfc9909afc5a Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Wed, 16 Oct 2013 09:55:39 -0700 Subject: [PATCH 435/486] Adds tests for Hyper-V Volume utils Adds unit tests for Hyper-V BaseVolumeUtils, VolumeUtils and VolumeUtilsV2 classes. Co-Authored-By: Mouhammad-Nashwan Azhari Partial-Bug: #1220256 Change-Id: I2792e0c08d2602f3c2690d44b632c2e932a94115 --- .../tests/virt/hyperv/test_basevolumeutils.py | 130 ++++++++++++++++++ nova/tests/virt/hyperv/test_volumeutils.py | 21 ++- nova/tests/virt/hyperv/test_volumeutilsv2.py | 35 +++++ 3 files changed, 184 insertions(+), 2 deletions(-) create mode 100644 nova/tests/virt/hyperv/test_basevolumeutils.py diff --git a/nova/tests/virt/hyperv/test_basevolumeutils.py b/nova/tests/virt/hyperv/test_basevolumeutils.py new file mode 100644 index 0000000000..640e752985 --- /dev/null +++ b/nova/tests/virt/hyperv/test_basevolumeutils.py @@ -0,0 +1,130 @@ +# Copyright 2014 Cloudbase Solutions Srl +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova import test +from nova.virt.hyperv import basevolumeutils + + +def _exception_thrower(): + raise Exception("Testing exception handling.") + + +class BaseVolumeUtilsTestCase(test.NoDBTestCase): + """Unit tests for the Hyper-V BaseVolumeUtils class.""" + + _FAKE_COMPUTER_NAME = "fake_computer_name" + _FAKE_DOMAIN_NAME = "fake_domain_name" + _FAKE_INITIATOR_NAME = "fake_initiator_name" + _FAKE_INITIATOR_IQN_NAME = "iqn.1991-05.com.microsoft:fake_computer_name" + _FAKE_DISK_PATH = 'fake_path DeviceID="123\\\\2"' + _FAKE_MOUNT_DEVICE = '/dev/fake/mount' + _FAKE_DEVICE_NAME = '/dev/fake/path' + _FAKE_SWAP = {'device_name': _FAKE_DISK_PATH} + + def setUp(self): + self._volutils = basevolumeutils.BaseVolumeUtils() + self._volutils._conn_wmi = mock.MagicMock() + self._volutils._conn_cimv2 = mock.MagicMock() + + super(BaseVolumeUtilsTestCase, self).setUp() + + def test_get_iscsi_initiator_ok(self): + self._check_get_iscsi_initiator( + mock.MagicMock(return_value=mock.sentinel.FAKE_KEY), + self._FAKE_INITIATOR_NAME) + + def test_get_iscsi_initiator_exception(self): + initiator_name = "%(iqn)s.%(domain)s" % { + 'iqn': self._FAKE_INITIATOR_IQN_NAME, + 'domain': self._FAKE_DOMAIN_NAME + } + + self._check_get_iscsi_initiator(_exception_thrower, initiator_name) + + def _check_get_iscsi_initiator(self, winreg_method, expected): + mock_computer = mock.MagicMock() + mock_computer.name = self._FAKE_COMPUTER_NAME + mock_computer.Domain = self._FAKE_DOMAIN_NAME + self._volutils._conn_cimv2.Win32_ComputerSystem.return_value = [ + mock_computer] + + with mock.patch.object(basevolumeutils, + '_winreg', create=True) as mock_winreg: + mock_winreg.OpenKey = winreg_method + mock_winreg.QueryValueEx = mock.MagicMock(return_value=[expected]) + + initiator_name = self._volutils.get_iscsi_initiator() + self.assertEqual(expected, initiator_name) + + @mock.patch.object(basevolumeutils, 'driver') + def test_volume_in_mapping(self, mock_driver): + mock_driver.block_device_info_get_mapping.return_value = [ + {'mount_device': self._FAKE_MOUNT_DEVICE}] + mock_driver.block_device_info_get_swap = mock.MagicMock( + return_value=self._FAKE_SWAP) + mock_driver.block_device_info_get_ephemerals = mock.MagicMock( + return_value=[{'device_name': self._FAKE_DEVICE_NAME}]) + + mock_driver.swap_is_usable = mock.MagicMock(return_value=True) + + self.assertTrue(self._volutils.volume_in_mapping( + self._FAKE_MOUNT_DEVICE, mock.sentinel.FAKE_BLOCK_DEVICE_INFO)) + + @mock.patch.object(basevolumeutils.BaseVolumeUtils, + "_get_drive_number_from_disk_path") + def test_get_session_id_from_mounted_disk(self, mock_get_session_id): + mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER + mock_initiator_session = self._create_initiator_session() + self._volutils._conn_wmi.query.return_value = [mock_initiator_session] + session_id = self._volutils.get_session_id_from_mounted_disk( + self._FAKE_DISK_PATH) + + self.assertEqual(mock.sentinel.FAKE_SESSION_ID, session_id) + + def test_get_device_number_for_target(self): + init_session = self._create_initiator_session() + self._volutils._conn_wmi.query.return_value = [init_session] + device_number = self._volutils.get_device_number_for_target( + mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN) + + self.assertEqual(mock.sentinel.FAKE_DEVICE_NUMBER, device_number) + + @mock.patch.object(basevolumeutils.BaseVolumeUtils, + "_get_drive_number_from_disk_path") + def test_get_target_from_disk_path(self, mock_get_session_id): + mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER + init_sess = self._create_initiator_session() + mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass + mock_ses_class.return_value = [init_sess] + + (target_name, scsi_lun) = self._volutils.get_target_from_disk_path( + self._FAKE_DISK_PATH) + + self.assertEqual(mock.sentinel.FAKE_TARGET_NAME, target_name) + self.assertEqual(mock.sentinel.FAKE_LUN, scsi_lun) + + def _create_initiator_session(self): + device = mock.MagicMock() + device.ScsiLun = mock.sentinel.FAKE_LUN + device.DeviceNumber = mock.sentinel.FAKE_DEVICE_NUMBER + device.TargetName = mock.sentinel.FAKE_TARGET_NAME + init_session = mock.MagicMock() + init_session.Devices = [device] + init_session.SessionId = mock.sentinel.FAKE_SESSION_ID + + return init_session diff --git a/nova/tests/virt/hyperv/test_volumeutils.py b/nova/tests/virt/hyperv/test_volumeutils.py index b554222103..f44ee14594 100644 --- a/nova/tests/virt/hyperv/test_volumeutils.py +++ b/nova/tests/virt/hyperv/test_volumeutils.py @@ -1,5 +1,7 @@ # Copyright 2014 Cloudbase Solutions Srl # +# All Rights Reserved. +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -15,7 +17,7 @@ import mock from oslo.config import cfg -from nova import test +from nova.tests.virt.hyperv import test_basevolumeutils from nova.virt.hyperv import vmutils from nova.virt.hyperv import volumeutils @@ -24,7 +26,7 @@ 'hyperv') -class VolumeUtilsTestCase(test.NoDBTestCase): +class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase): """Unit tests for the Hyper-V VolumeUtils class.""" _FAKE_PORTAL_ADDR = '10.1.1.1' @@ -32,10 +34,13 @@ class VolumeUtilsTestCase(test.NoDBTestCase): _FAKE_LUN = 0 _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target' + _FAKE_STDOUT_VALUE = 'The operation completed successfully' + def setUp(self): super(VolumeUtilsTestCase, self).setUp() self._volutils = volumeutils.VolumeUtils() self._volutils._conn_wmi = mock.MagicMock() + self._volutils._conn_cimv2 = mock.MagicMock() self.flags(volume_attach_retry_count=4, group='hyperv') self.flags(volume_attach_retry_interval=0, group='hyperv') @@ -132,3 +137,15 @@ def test_execute_raise_exception(self): def test_execute_exception(self): self._test_execute_wrapper(False) + + @mock.patch.object(volumeutils, 'utils') + def test_logout_storage_target(self, mock_utils): + mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE, + mock.sentinel.FAKE_STDERR_VALUE) + session = mock.MagicMock() + session.SessionId = mock.sentinel.FAKE_SESSION_ID + self._volutils._conn_wmi.query.return_value = [session] + + self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN) + mock_utils.execute.assert_called_once_with( + 'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID) diff --git a/nova/tests/virt/hyperv/test_volumeutilsv2.py b/nova/tests/virt/hyperv/test_volumeutilsv2.py index b933e3331d..1c242b71f8 100644 --- a/nova/tests/virt/hyperv/test_volumeutilsv2.py +++ b/nova/tests/virt/hyperv/test_volumeutilsv2.py @@ -36,6 +36,7 @@ def setUp(self): super(VolumeUtilsV2TestCase, self).setUp() self._volutilsv2 = volumeutilsv2.VolumeUtilsV2() self._volutilsv2._conn_storage = mock.MagicMock() + self._volutilsv2._conn_wmi = mock.MagicMock() self.flags(volume_attach_retry_count=4, group='hyperv') self.flags(volume_attach_retry_interval=0, group='hyperv') @@ -110,3 +111,37 @@ def test_login_disconncted_target(self): def test_login_target_exception(self): self._test_login_target(False, True) + + def test_logout_storage_target(self): + mock_msft_target = self._volutilsv2._conn_storage.MSFT_iSCSITarget + mock_msft_session = self._volutilsv2._conn_storage.MSFT_iSCSISession + + mock_target = mock.MagicMock() + mock_target.IsConnected = True + mock_msft_target.return_value = [mock_target] + + mock_session = mock.MagicMock() + mock_session.IsPersistent = True + mock_msft_session.return_value = [mock_session] + + self._volutilsv2.logout_storage_target(self._FAKE_TARGET) + + mock_msft_target.assert_called_once_with(NodeAddress=self._FAKE_TARGET) + mock_msft_session.assert_called_once_with( + TargetNodeAddress=self._FAKE_TARGET) + + mock_session.Unregister.assert_called_once_with() + mock_target.Disconnect.assert_called_once_with() + + @mock.patch.object(volumeutilsv2.VolumeUtilsV2, 'logout_storage_target') + def test_execute_log_out(self, mock_logout_target): + sess_class = self._volutilsv2._conn_wmi.MSiSCSIInitiator_SessionClass + + mock_session = mock.MagicMock() + sess_class.return_value = [mock_session] + + self._volutilsv2.execute_log_out(mock.sentinel.FAKE_SESSION_ID) + + sess_class.assert_called_once_with( + SessionId=mock.sentinel.FAKE_SESSION_ID) + mock_logout_target.assert_called_once_with(mock_session.TargetName) From 52409a957ebfc2ab99ff6bc6c80cabe94d4c3990 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 20 Aug 2014 15:20:37 -0700 Subject: [PATCH 436/486] Add missing flags to fakelibvirt for migration Commit 26504d71ceaecf22f135d8321769db801290c405 added VIR_MIGRATE_UNDEFINE_SOURCE and VIR_MIGRATE_PEER2PEER to the live and block migration default flags but did not add those flags to the fakelibvirt module for testing. This change adds the missing flags and also adds VIR_MIGRATE_NON_SHARED_INC for block migration which was also missing. Change-Id: I5f3af95a56b95ec20f93b12360bf8b3d40b5f45f Closes-Bug: #1359475 --- nova/tests/virt/libvirt/fakelibvirt.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py index 616ee6d01a..095cd1a50c 100644 --- a/nova/tests/virt/libvirt/fakelibvirt.py +++ b/nova/tests/virt/libvirt/fakelibvirt.py @@ -113,8 +113,11 @@ def _reset(): VIR_CRED_REALM = 8 VIR_CRED_EXTERNAL = 9 +VIR_MIGRATE_LIVE = 1 VIR_MIGRATE_PEER2PEER = 2 +VIR_MIGRATE_TUNNELLED = 4 VIR_MIGRATE_UNDEFINE_SOURCE = 16 +VIR_MIGRATE_NON_SHARED_INC = 128 VIR_NODE_CPU_STATS_ALL_CPUS = -1 From ce383c022a3d58018ad503f98f65fcdd4429b6fd Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 13 Aug 2014 12:49:16 -0700 Subject: [PATCH 437/486] Remove _instance_update usage in _default_block_device_names We have an instance object in _default_block_device_names so we can just call instance.save() directly. We don't need to update resource tracker stats so there is no need to call _instance_update in this method. Part of blueprint compute-manager-objects-juno Change-Id: Ia8faa67a36ff31a3ba526770f5d8a0361326ffc7 --- nova/compute/manager.py | 3 +-- nova/tests/compute/test_compute.py | 19 +++++++++---------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7934279ea4..331f05f1e9 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1697,8 +1697,7 @@ def _default_block_device_names(self, context, instance, update_instance = update_root_bdm = True if update_instance: - self._instance_update(context, instance.uuid, - root_device_name=root_device_name) + instance.save() if update_root_bdm: root_bdm.save() diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index b8adaba7df..614c433f21 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -6833,11 +6833,10 @@ def _get_instance_and_bdm_for_dev_defaults_tests(self): def test_default_block_device_names_empty_instance_root_dev(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() instance.root_device_name = None - self.mox.StubOutWithMock(self.compute, '_instance_update') + self.mox.StubOutWithMock(objects.Instance, 'save') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') - self.compute._instance_update(self.context, instance.uuid, - root_device_name='/dev/vda') + instance.save().AndReturn(None) self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], [bdm for bdm in bdms]) @@ -6845,11 +6844,11 @@ def test_default_block_device_names_empty_instance_root_dev(self): self.compute._default_block_device_names(self.context, instance, {}, bdms) + self.assertEqual('/dev/vda', instance.root_device_name) def test_default_block_device_names_empty_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() bdms[0]['device_name'] = None - self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(self.compute, '_default_device_names_for_instance') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') @@ -6866,7 +6865,7 @@ def test_default_block_device_names_no_root_device(self): instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests() instance.root_device_name = None bdms[0]['device_name'] = None - self.mox.StubOutWithMock(self.compute, '_instance_update') + self.mox.StubOutWithMock(objects.Instance, 'save') self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save') self.mox.StubOutWithMock(self.compute, '_default_root_device_name') @@ -6875,8 +6874,7 @@ def test_default_block_device_names_no_root_device(self): self.compute._default_root_device_name(instance, mox.IgnoreArg(), bdms[0]).AndReturn('/dev/vda') - self.compute._instance_update(self.context, instance.uuid, - root_device_name='/dev/vda') + instance.save().AndReturn(None) bdms[0].save().AndReturn(None) self.compute._default_device_names_for_instance(instance, '/dev/vda', [], [], @@ -6885,6 +6883,7 @@ def test_default_block_device_names_no_root_device(self): self.compute._default_block_device_names(self.context, instance, {}, bdms) + self.assertEqual('/dev/vda', instance.root_device_name) def test_default_block_device_names_with_blank_volumes(self): instance = self._create_fake_instance_obj() @@ -6927,7 +6926,7 @@ def test_default_block_device_names_with_blank_volumes(self): with contextlib.nested( mock.patch.object(self.compute, '_default_root_device_name', return_value='/dev/vda'), - mock.patch.object(self.compute, '_instance_update'), + mock.patch.object(objects.Instance, 'save'), mock.patch.object(objects.BlockDeviceMapping, 'save'), mock.patch.object(self.compute, '_default_device_names_for_instance') @@ -6937,8 +6936,8 @@ def test_default_block_device_names_with_blank_volumes(self): image_meta, bdms) default_root_device.assert_called_once_with(instance, image_meta, bdms[0]) - instance_update.assert_called_once_with( - self.context, instance.uuid, root_device_name='/dev/vda') + instance_update.assert_called_once_with() + self.assertEqual('/dev/vda', instance.root_device_name) self.assertTrue(object_save.called) default_device_names.assert_called_once_with(instance, '/dev/vda', [bdms[-2]], [bdms[-1]], From 938398d2f4366ff5fe9a038a4b220746f28197ba Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 13 Aug 2014 13:19:40 -0700 Subject: [PATCH 438/486] Treat instance like an object in _build_instance The _build_instance method has an instance object already but treats it like a primitive dict. This change uses dot notation to access fields on the object and updates the unit tests to use instance objects. Part of blueprint compute-manager-objects-juno Change-Id: I19c5abc502b57579b4706e8a8e7b7a64f6f9b6ca --- nova/compute/manager.py | 14 +++++------ nova/tests/compute/test_compute_mgr.py | 32 +++++++++++++------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 331f05f1e9..1e647708eb 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1318,7 +1318,7 @@ def _build_instance(self, context, request_spec, filter_properties, network_info = None bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance['uuid']) + context, instance.uuid) # b64 decode the files to inject: injected_files_orig = injected_files @@ -1341,7 +1341,7 @@ def _build_instance(self, context, request_spec, filter_properties, dhcp_options) self._instance_update( - context, instance['uuid'], + context, instance.uuid, vm_state=vm_states.BUILDING, task_state=task_states.BLOCK_DEVICE_MAPPING) @@ -1354,8 +1354,8 @@ def _build_instance(self, context, request_spec, filter_properties, context, instance, bdms) set_access_ip = (is_first_time and - not instance['access_ip_v4'] and - not instance['access_ip_v6']) + not instance.access_ip_v4 and + not instance.access_ip_v6) instance = self._spawn(context, instance, image_meta, network_info, block_device_info, @@ -1374,7 +1374,7 @@ def _build_instance(self, context, request_spec, filter_properties, 'for deleted instance') LOG.exception(msg, instance=instance) raise exception.BuildAbortException( - instance_uuid=instance['uuid'], + instance_uuid=instance.uuid, reason=_("Instance disappeared during build")) except (exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException) as e: @@ -1406,9 +1406,9 @@ def _build_instance(self, context, request_spec, filter_properties, filter_properties, bdms, legacy_bdm_in_spec) if rescheduled: # log the original build error - self._log_original_error(exc_info, instance['uuid']) + self._log_original_error(exc_info, instance.uuid) raise exception.RescheduledException( - instance_uuid=instance['uuid'], + instance_uuid=instance.uuid, reason=unicode(exc_info[1])) else: # not re-scheduling, go to error: diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index d07b07cde0..201600d40a 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -111,10 +111,10 @@ def instance_claim(self, context, instance, limits): self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') - instance = fake_instance.fake_db_instance(system_metadata={}) + instance = fake_instance.fake_instance_obj(self.context) objects.BlockDeviceMappingList.get_by_instance_uuid( - mox.IgnoreArg(), instance['uuid']).AndReturn([]) + mox.IgnoreArg(), instance.uuid).AndReturn([]) node = 'fake_node' self.compute._get_resource_tracker(node).AndReturn( @@ -134,7 +134,7 @@ def fake_allocate(context, *args, **kwargs): self.compute._allocate_network(mox.IgnoreArg(), instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).WithSideEffects(fake_allocate) - self.compute._instance_update(self.context, instance['uuid'], + self.compute._instance_update(self.context, instance.uuid, system_metadata={'network_allocated': 'True'}) self.mox.ReplayAll() @@ -1902,10 +1902,10 @@ def _instance_action_events(self): self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_finish_with_failure') objects.InstanceActionEvent.event_start( - self.context, self.instance['uuid'], mox.IgnoreArg(), + self.context, self.instance.uuid, mox.IgnoreArg(), want_result=False) objects.InstanceActionEvent.event_finish_with_failure( - self.context, self.instance['uuid'], mox.IgnoreArg(), + self.context, self.instance.uuid, mox.IgnoreArg(), exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(), want_result=False) @@ -1944,7 +1944,7 @@ def test_build_abort_exception(self): self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.BuildAbortException(reason='', - instance_uuid=self.instance['uuid'])) + instance_uuid=self.instance.uuid)) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute._cleanup_volumes(self.context, self.instance.uuid, @@ -1975,7 +1975,7 @@ def test_rescheduled_exception(self): self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', - instance_uuid=self.instance['uuid'])) + instance_uuid=self.instance.uuid)) self.compute.compute_task_api.build_instances(self.context, [self.instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, @@ -2005,11 +2005,11 @@ def test_rescheduled_exception_without_retry(self): self.block_device_mapping, self.node, self.limits, {}).AndRaise( exception.RescheduledException(reason='', - instance_uuid=self.instance['uuid'])) + instance_uuid=self.instance.uuid)) self.compute._cleanup_allocated_networks(self.context, self.instance, self.requested_networks) self.compute._set_instance_error_state(self.context, - self.instance['uuid']) + self.instance.uuid) self._instance_action_events() self.mox.ReplayAll() @@ -2037,7 +2037,7 @@ def test_rescheduled_exception_do_not_deallocate_network(self): self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', - instance_uuid=self.instance['uuid'])) + instance_uuid=self.instance.uuid)) self.compute.driver.deallocate_networks_on_reschedule( self.instance).AndReturn(False) self.compute.compute_task_api.build_instances(self.context, @@ -2071,7 +2071,7 @@ def test_rescheduled_exception_deallocate_network(self): self.block_device_mapping, self.node, self.limits, self.filter_properties).AndRaise( exception.RescheduledException(reason='', - instance_uuid=self.instance['uuid'])) + instance_uuid=self.instance.uuid)) self.compute.driver.deallocate_networks_on_reschedule( self.instance).AndReturn(True) self.compute._cleanup_allocated_networks(self.context, self.instance, @@ -2167,7 +2167,7 @@ def test_instance_not_found(self): self._notify_about_instance_usage('create.end', fault=exc, stub=False) conductor_rpcapi.ConductorAPI.instance_update( - self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor') + self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor') self.mox.ReplayAll() self.assertRaises(exception.InstanceNotFound, @@ -2198,7 +2198,7 @@ def test_reschedule_on_exception(self): network_info=self.network_info, block_device_info=self.block_device_info).AndRaise(exc) conductor_rpcapi.ConductorAPI.instance_update( - self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor') + self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor') self._notify_about_instance_usage('create.error', fault=exc, stub=False) self.mox.ReplayAll() @@ -2283,7 +2283,7 @@ def _test_build_and_run_spawn_exceptions(self, exc): block_device_info=self.block_device_info)) instance_update.assert_has_calls(mock.call(self.context, - self.instance['uuid'], mock.ANY, 'conductor')) + self.instance.uuid, mock.ANY, 'conductor')) _shutdown_instance.assert_called_once_with(self.context, self.instance, self.block_device_mapping, @@ -2337,12 +2337,12 @@ def instance_claim(self, context, instance, limits): def test_build_resources_buildabort_reraise(self): exc = exception.BuildAbortException( - instance_uuid=self.instance['uuid'], reason='') + instance_uuid=self.instance.uuid, reason='') self.mox.StubOutWithMock(self.compute, '_build_resources') self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI, 'instance_update') conductor_rpcapi.ConductorAPI.instance_update( - self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor') + self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor') self._notify_about_instance_usage('create.start', extra_usage_info={'image_name': self.image.get('name')}) self.compute._build_resources(self.context, self.instance, From 8a862715bfb17f70b480258c263b5d99d058d3e3 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 13 Aug 2014 14:27:22 -0700 Subject: [PATCH 439/486] Remove _instance_update usage in _build_instance The _build_instance method has an instance object so just call instance.save() rather than _instance_update which does a separate call to conductor to update the instance updates the resource tracker, which is unnecessary in this case. Compare to the _build_resources method. Part of blueprint compute-manager-objects-juno Change-Id: Ic350a424f83638e8129d55dda168913798ebc31a --- nova/compute/manager.py | 7 +++---- nova/tests/compute/test_compute_mgr.py | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1e647708eb..f8abdd0012 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1340,10 +1340,9 @@ def _build_instance(self, context, request_spec, filter_properties, instance, requested_networks, macs, security_groups, dhcp_options) - self._instance_update( - context, instance.uuid, - vm_state=vm_states.BUILDING, - task_state=task_states.BLOCK_DEVICE_MAPPING) + instance.vm_state = vm_states.BUILDING + instance.task_state = task_states.BLOCK_DEVICE_MAPPING + instance.save() # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 201600d40a..5459432cc7 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -107,7 +107,6 @@ def instance_claim(self, context, instance, limits): self.mox.StubOutWithMock(self.compute, '_get_resource_tracker') self.mox.StubOutWithMock(self.compute, '_allocate_network') - self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @@ -134,17 +133,17 @@ def fake_allocate(context, *args, **kwargs): self.compute._allocate_network(mox.IgnoreArg(), instance, mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).WithSideEffects(fake_allocate) - self.compute._instance_update(self.context, instance.uuid, - system_metadata={'network_allocated': 'True'}) self.mox.ReplayAll() - self.compute._build_instance(self.context, {}, {}, + instance, nw_info = self.compute._build_instance(self.context, {}, {}, None, None, None, True, node, instance, {}, False) self.assertFalse(self.admin_context, "_allocate_network called with admin context") + self.assertEqual(vm_states.BUILDING, instance.vm_state) + self.assertEqual(task_states.BLOCK_DEVICE_MAPPING, instance.task_state) def test_allocate_network_fails(self): self.flags(network_allocate_retries=0) From 26871aa60ba663d56951fcd449167bfadbe01522 Mon Sep 17 00:00:00 2001 From: Thang Pham Date: Wed, 20 Aug 2014 22:43:14 -0400 Subject: [PATCH 440/486] Log warning message if volume quota is exceeded It would be useful to the user if a warning message is logged when the volume quota is exceeded instead of a debug message. This would allow the root cause to be more easily determined instead of turning on the debug option. Change-Id: I19dc078cbacf8f39480e8a7f4c4edb764e4696eb Closes-Bug: #1349268 --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 507eafa70f..01573fab3d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1761,9 +1761,9 @@ def _prep_block_device(self, context, instance, bdms, return block_device_info except exception.OverQuota: - msg = ('Failed to create block device for instance due to being ' - 'over volume resource quota') - LOG.debug(msg, instance=instance) + msg = _LW('Failed to create block device for instance due to ' + 'being over volume resource quota') + LOG.warn(msg, instance=instance) raise exception.InvalidBDM() except Exception: From 9f1f9a8b6fadbba2eb6733e0f13b727f14a52a3f Mon Sep 17 00:00:00 2001 From: Joe Cropper Date: Wed, 20 Aug 2014 21:18:17 -0500 Subject: [PATCH 441/486] Handle non-ascii characters in spawn exception msg If a compute driver's spawn function returns an exception whose text has any non-ascii characters (e.g., for globalization), the compute manager's "reschedule logic" doesn't handle it properly as it runs the text through the str() function, which causes UnicodeEncodeError exceptions. This simply sends the exception text through six.text_type so as to no longer blow up and proceed with rescheduling as intended. Closes-Bug: #1359427 Change-Id: I3fef783feffbe99c6971543c881e3fce2b60693b --- nova/compute/manager.py | 2 +- nova/tests/compute/test_compute_mgr.py | 33 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 507eafa70f..c15827bdf3 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -2058,7 +2058,7 @@ def _build_and_run_instance(self, context, instance, image, injected_files, self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( - instance_uuid=instance.uuid, reason=str(e)) + instance_uuid=instance.uuid, reason=six.text_type(e)) # NOTE(alaski): This is only useful during reschedules, remove it now. instance.system_metadata.pop('network_allocated', None) diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index d07b07cde0..ad12d53d15 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -1993,6 +1993,39 @@ def test_rescheduled_exception(self): block_device_mapping=self.block_device_mapping, node=self.node, limits=self.limits) + def test_rescheduled_exception_with_non_ascii_exception(self): + exc = exception.NovaException(u's\xe9quence') + self.mox.StubOutWithMock(self.compute.driver, 'spawn') + self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI, + 'instance_update') + self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.compute._build_networks_for_instance(self.context, self.instance, + self.requested_networks, self.security_groups).AndReturn( + self.network_info) + self.compute._shutdown_instance(self.context, self.instance, + self.block_device_mapping, self.requested_networks, + try_deallocate_networks=False) + self._notify_about_instance_usage('create.start', + extra_usage_info={'image_name': self.image.get('name')}) + self._build_and_run_instance_update() + self.compute.driver.spawn(self.context, self.instance, self.image, + self.injected_files, self.admin_pass, + network_info=self.network_info, + block_device_info=self.block_device_info).AndRaise(exc) + self._notify_about_instance_usage('create.error', + fault=exc, stub=False) + conductor_rpcapi.ConductorAPI.instance_update( + self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor') + self.mox.ReplayAll() + + self.assertRaises(exception.RescheduledException, + self.compute._build_and_run_instance, self.context, + self.instance, self.image, self.injected_files, + self.admin_pass, self.requested_networks, self.security_groups, + self.block_device_mapping, self.node, + self.limits, self.filter_properties) + def test_rescheduled_exception_without_retry(self): self.mox.StubOutWithMock(self.compute, '_build_and_run_instance') self.mox.StubOutWithMock(self.compute, '_set_instance_error_state') From b826fba1cd61f7ddbee091c220eaef7ea5317c1b Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Thu, 21 Aug 2014 14:24:32 +0900 Subject: [PATCH 442/486] Backport some v3 availability zones API UT to v2 API This patch adds below v3 availability zones API unittest into V2 API to improve V2 API unittesting. Change-Id: Icf5875b68ecde85c044efb1e2b05416de8b0c9db --- .../compute/contrib/test_availability_zone.py | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py index 9fb0cb29fe..1280e066f9 100644 --- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py +++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py @@ -18,16 +18,24 @@ import webob from nova.api.openstack.compute.contrib import availability_zone +from nova.api.openstack.compute import servers +from nova.api.openstack import extensions from nova import availability_zones +from nova.compute import api as compute_api +from nova.compute import flavors from nova import context from nova import db from nova.openstack.common import jsonutils from nova import servicegroup from nova import test from nova.tests.api.openstack import fakes +from nova.tests import fake_instance +from nova.tests.image import fake from nova.tests import matchers from nova.tests.objects import test_service +FAKE_UUID = fakes.FAKE_UUID + def fake_service_get_all(context, disabled=None): def __fake_service(binary, availability_zone, @@ -228,6 +236,138 @@ def test_availability_zone_detail_no_services(self): matchers.DictMatches(expected_response)) +class ServersControllerCreateTest(test.TestCase): + + def setUp(self): + """Shared implementation for tests below that create instance.""" + super(ServersControllerCreateTest, self).setUp() + + self.flags(verbose=True, + enable_instance_password=True) + self.instance_cache_num = 0 + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = servers.Controller(self.ext_mgr) + + def instance_create(context, inst): + inst_type = flavors.get_flavor_by_flavor_id(3) + image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' + def_image_ref = 'http://localhost/images/%s' % image_uuid + self.instance_cache_num += 1 + instance = fake_instance.fake_db_instance(**{ + 'id': self.instance_cache_num, + 'display_name': inst['display_name'] or 'test', + 'uuid': FAKE_UUID, + 'instance_type': dict(inst_type), + 'access_ip_v4': '1.2.3.4', + 'access_ip_v6': 'fead::1234', + 'image_ref': inst.get('image_ref', def_image_ref), + 'user_id': 'fake', + 'project_id': 'fake', + 'reservation_id': inst['reservation_id'], + "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), + "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), + "progress": 0, + "fixed_ips": [], + "task_state": "", + "vm_state": "", + "root_device_name": inst.get('root_device_name', 'vda'), + }) + return instance + + fake.stub_out_image_service(self.stubs) + self.stubs.Set(db, 'instance_create', instance_create) + + def _test_create_extra(self, params): + image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) + server.update(params) + body = dict(server=server) + req = fakes.HTTPRequest.blank('/v2/fake/servers') + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + server = self.controller.create(req, body=body).obj['server'] + + def test_create_instance_with_availability_zone_disabled(self): + availability_zone = [{'availability_zone': 'foo'}] + params = {'availability_zone': availability_zone} + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertIsNone(kwargs['availability_zone']) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self._test_create_extra(params) + + def test_create_instance_with_availability_zone(self): + self.ext_mgr.extensions = {'os-availability-zone': 'fake'} + + def create(*args, **kwargs): + self.assertIn('availability_zone', kwargs) + self.assertEqual('nova', kwargs['availability_zone']) + return old_create(*args, **kwargs) + + old_create = compute_api.API.create + self.stubs.Set(compute_api.API, 'create', create) + image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' + flavor_ref = 'http://localhost/v2/fake/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + 'availability_zone': 'nova', + }, + } + + req = fakes.HTTPRequest.blank('/v2/fake/servers') + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + admin_context = context.get_admin_context() + db.service_create(admin_context, {'host': 'host1_zones', + 'binary': "nova-compute", + 'topic': 'compute', + 'report_count': 0}) + agg = db.aggregate_create(admin_context, + {'name': 'agg1'}, {'availability_zone': 'nova'}) + db.aggregate_host_add(admin_context, agg['id'], 'host1_zones') + res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(fakes.FAKE_UUID, server['id']) + + def test_create_instance_without_availability_zone(self): + self.ext_mgr.extensions = {'os-availability-zone': 'fake'} + image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' + flavor_ref = 'http://localhost/v2/fake/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'metadata': { + 'hello': 'world', + 'open': 'stack', + }, + }, + } + + req = fakes.HTTPRequest.blank('/v2/fake/servers') + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(fakes.FAKE_UUID, server['id']) + + class AvailabilityZoneSerializerTest(test.NoDBTestCase): def test_availability_zone_index_detail_serializer(self): def _verify_zone(zone_dict, tree): From 6c94ed023eff02c76dc81346f260591e8abf6c4f Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Thu, 21 Aug 2014 15:47:36 +0900 Subject: [PATCH 443/486] Backport v3 config_drive API unittest to v2 API This patch adds some v3 config_drive API unittest into V2 API to improve V2 API unittesting. Change-Id: Idbe85edd44d99cfa05e16faf10d4ca11f367c892 --- .../compute/contrib/test_config_drive.py | 138 +++++++++++++++++- 1 file changed, 136 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/api/openstack/compute/contrib/test_config_drive.py index 1b68e724b4..eec422c2ce 100644 --- a/nova/tests/api/openstack/compute/contrib/test_config_drive.py +++ b/nova/tests/api/openstack/compute/contrib/test_config_drive.py @@ -13,14 +13,21 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime + import webob from nova.api.openstack.compute.contrib import config_drive +from nova.api.openstack.compute import servers +from nova.api.openstack import extensions +from nova.compute import api as compute_api +from nova.compute import flavors from nova import db from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes -import nova.tests.image.fake +from nova.tests import fake_instance +from nova.tests.image import fake class ConfigDriveTest(test.TestCase): @@ -30,7 +37,7 @@ def setUp(self): self.Controller = config_drive.Controller() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - nova.tests.image.fake.stub_out_image_service(self.stubs) + fake.stub_out_image_service(self.stubs) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], @@ -57,3 +64,130 @@ def test_detail_servers(self): self.assertNotEqual(len(server_dicts), 0) for server_dict in server_dicts: self.assertIn('config_drive', server_dict) + + +class ServersControllerCreateTest(test.TestCase): + + def setUp(self): + """Shared implementation for tests below that create instance.""" + super(ServersControllerCreateTest, self).setUp() + + self.instance_cache_num = 0 + + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = servers.Controller(self.ext_mgr) + + def instance_create(context, inst): + inst_type = flavors.get_flavor_by_flavor_id(3) + image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' + def_image_ref = 'http://localhost/images/%s' % image_uuid + self.instance_cache_num += 1 + instance = fake_instance.fake_db_instance(**{ + 'id': self.instance_cache_num, + 'display_name': inst['display_name'] or 'test', + 'uuid': fakes.FAKE_UUID, + 'instance_type': dict(inst_type), + 'access_ip_v4': '1.2.3.4', + 'access_ip_v6': 'fead::1234', + 'image_ref': inst.get('image_ref', def_image_ref), + 'user_id': 'fake', + 'project_id': 'fake', + 'reservation_id': inst['reservation_id'], + "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), + "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), + "progress": 0, + "fixed_ips": [], + "task_state": "", + "vm_state": "", + "root_device_name": inst.get('root_device_name', 'vda'), + }) + + return instance + + fake.stub_out_image_service(self.stubs) + self.stubs.Set(db, 'instance_create', instance_create) + + def _test_create_extra(self, params): + image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) + server.update(params) + body = dict(server=server) + req = fakes.HTTPRequest.blank('/v2/fake/servers') + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + server = self.controller.create(req, body=body).obj['server'] + + def test_create_instance_with_config_drive_disabled(self): + params = {'config_drive': "False"} + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertIsNone(kwargs['config_drive']) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self._test_create_extra(params) + + def _create_instance_body_of_config_drive(self, param): + self.ext_mgr.extensions = {'os-config-drive': 'fake'} + + def create(*args, **kwargs): + self.assertIn('config_drive', kwargs) + return old_create(*args, **kwargs) + + old_create = compute_api.API.create + self.stubs.Set(compute_api.API, 'create', create) + image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' + flavor_ref = 'http://localhost/v2/fake/flavors/3' + body = { + 'server': { + 'name': 'config_drive_test', + 'imageRef': image_href, + 'flavorRef': flavor_ref, + 'config_drive': param, + }, + } + + req = fakes.HTTPRequest.blank('/v2/fake/servers') + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + return req, body + + def test_create_instance_with_config_drive(self): + param = True + req, body = self._create_instance_body_of_config_drive(param) + res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(fakes.FAKE_UUID, server['id']) + + def test_create_instance_with_config_drive_as_boolean_string(self): + param = 'false' + req, body = self._create_instance_body_of_config_drive(param) + res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(fakes.FAKE_UUID, server['id']) + + def test_create_instance_with_bad_config_drive(self): + param = 12345 + req, body = self._create_instance_body_of_config_drive(param) + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.create, req, body=body) + + def test_create_instance_without_config_drive(self): + param = True + req, body = self._create_instance_body_of_config_drive(param) + del body['server']['config_drive'] + res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(fakes.FAKE_UUID, server['id']) + + def test_create_instance_with_empty_config_drive(self): + param = '' + req, body = self._create_instance_body_of_config_drive(param) + res = self.controller.create(req, body=body).obj + server = res['server'] + self.assertEqual(fakes.FAKE_UUID, server['id']) From 5482e388fae172a84ef656602422f96e2cfa1cfe Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Thu, 21 Aug 2014 16:55:43 +0900 Subject: [PATCH 444/486] Convert v3 config drive plugin to v2.1 This patch changes v3 config drive plugin to v2.1 and makes v2 unit tests share between v2 and v2.1. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: Ieed7619f3cb310b25d157ebb07f24468aea1af9d --- .../all_extensions/server-get-resp.json | 2 +- .../all_extensions/servers-details-resp.json | 2 +- .../server-config-drive-get-resp.json | 4 +- .../servers-config-drive-details-resp.json | 4 +- .../compute/plugins/v3/config_drive.py | 2 +- .../compute/schemas/v3/config_drive.py | 2 +- .../compute/contrib/test_config_drive.py | 125 ++++++-- .../compute/plugins/v3/test_config_drive.py | 267 ------------------ .../all_extensions/server-get-resp.json.tpl | 2 +- .../servers-details-resp.json.tpl | 2 +- .../server-config-drive-get-resp.json.tpl | 2 +- ...servers-config-drive-details-resp.json.tpl | 2 +- 12 files changed, 109 insertions(+), 307 deletions(-) delete mode 100644 nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py diff --git a/doc/v3/api_samples/all_extensions/server-get-resp.json b/doc/v3/api_samples/all_extensions/server-get-resp.json index 8826f360dc..f9112ec143 100644 --- a/doc/v3/api_samples/all_extensions/server-get-resp.json +++ b/doc/v3/api_samples/all_extensions/server-get-resp.json @@ -48,7 +48,7 @@ "name": "new-server-test", "os-access-ips:access_ip_v4": "", "os-access-ips:access_ip_v6": "", - "os-config-drive:config_drive": "", + "config_drive": "", "os-extended-availability-zone:availability_zone": "nova", "os-extended-server-attributes:host": "b8b357f7100d4391828f2177c922ef93", "os-extended-server-attributes:hypervisor_hostname": "fake-mini", diff --git a/doc/v3/api_samples/all_extensions/servers-details-resp.json b/doc/v3/api_samples/all_extensions/servers-details-resp.json index 9467fb4a40..f200c99d0e 100644 --- a/doc/v3/api_samples/all_extensions/servers-details-resp.json +++ b/doc/v3/api_samples/all_extensions/servers-details-resp.json @@ -49,7 +49,7 @@ "name": "new-server-test", "os-access-ips:access_ip_v4": "", "os-access-ips:access_ip_v6": "", - "os-config-drive:config_drive": "", + "config_drive": "", "os-extended-availability-zone:availability_zone": "nova", "os-extended-server-attributes:host": "c3f14e9812ad496baf92ccfb3c61e15f", "os-extended-server-attributes:hypervisor_hostname": "fake-mini", diff --git a/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json b/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json index d31c241f1a..f6aa279e22 100644 --- a/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json +++ b/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json @@ -46,11 +46,11 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-config-drive:config_drive": "", + "config_drive": "", "progress": 0, "status": "ACTIVE", "tenant_id": "openstack", "updated": "2013-09-22T02:33:25Z", "user_id": "fake" } -} \ No newline at end of file +} diff --git a/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json b/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json index 97b96e7a7d..4cd5efe43b 100644 --- a/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json +++ b/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json @@ -47,7 +47,7 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-config-drive:config_drive": "", + "config_drive": "", "progress": 0, "status": "ACTIVE", "tenant_id": "openstack", @@ -55,4 +55,4 @@ "user_id": "fake" } ] -} \ No newline at end of file +} diff --git a/nova/api/openstack/compute/plugins/v3/config_drive.py b/nova/api/openstack/compute/plugins/v3/config_drive.py index 345c30f2d9..ea27b1834a 100644 --- a/nova/api/openstack/compute/plugins/v3/config_drive.py +++ b/nova/api/openstack/compute/plugins/v3/config_drive.py @@ -21,7 +21,7 @@ from nova.api.openstack import wsgi ALIAS = "os-config-drive" -ATTRIBUTE_NAME = "%s:config_drive" % ALIAS +ATTRIBUTE_NAME = "config_drive" authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS) diff --git a/nova/api/openstack/compute/schemas/v3/config_drive.py b/nova/api/openstack/compute/schemas/v3/config_drive.py index b67a9ead0d..659423ea24 100644 --- a/nova/api/openstack/compute/schemas/v3/config_drive.py +++ b/nova/api/openstack/compute/schemas/v3/config_drive.py @@ -15,5 +15,5 @@ from nova.api.validation import parameter_types server_create = { - 'os-config-drive:config_drive': parameter_types.boolean, + 'config_drive': parameter_types.boolean, } diff --git a/nova/tests/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/api/openstack/compute/contrib/test_config_drive.py index eec422c2ce..a0242d7544 100644 --- a/nova/tests/api/openstack/compute/contrib/test_config_drive.py +++ b/nova/tests/api/openstack/compute/contrib/test_config_drive.py @@ -15,14 +15,20 @@ import datetime +from oslo.config import cfg import webob -from nova.api.openstack.compute.contrib import config_drive -from nova.api.openstack.compute import servers +from nova.api.openstack.compute.contrib import config_drive as config_drive_v2 +from nova.api.openstack.compute import plugins +from nova.api.openstack.compute.plugins.v3 import config_drive \ + as config_drive_v21 +from nova.api.openstack.compute.plugins.v3 import servers as servers_v21 +from nova.api.openstack.compute import servers as servers_v2 from nova.api.openstack import extensions from nova.compute import api as compute_api from nova.compute import flavors from nova import db +from nova import exception from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes @@ -30,27 +36,34 @@ from nova.tests.image import fake -class ConfigDriveTest(test.TestCase): +CONF = cfg.CONF + + +class ConfigDriveTestV21(test.TestCase): + base_url = '/v3/servers/' + + def _setup_wsgi(self): + self.app = fakes.wsgi_app_v3(init_only=('servers', 'os-config-drive')) + + def _get_config_drive_controller(self): + return config_drive_v21.ConfigDriveController() def setUp(self): - super(ConfigDriveTest, self).setUp() - self.Controller = config_drive.Controller() + super(ConfigDriveTestV21, self).setUp() + self.Controller = self._get_config_drive_controller() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fake.stub_out_image_service(self.stubs) - self.flags( - osapi_compute_extension=[ - 'nova.api.openstack.compute.contrib.select_extensions'], - osapi_compute_ext_list=['Config_drive']) + self._setup_wsgi() def test_show(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get()) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) - req = webob.Request.blank('/v2/fake/servers/1') + req = webob.Request.blank(self.base_url + '1') req.headers['Content-Type'] = 'application/json' - response = req.get_response(fakes.wsgi_app(init_only=('servers',))) + response = req.get_response(self.app) self.assertEqual(response.status_int, 200) res_dict = jsonutils.loads(response.body) self.assertIn('config_drive', res_dict['server']) @@ -58,25 +71,54 @@ def test_show(self): def test_detail_servers(self): self.stubs.Set(db, 'instance_get_all_by_filters', fakes.fake_instance_get_all_by_filters()) - req = fakes.HTTPRequest.blank('/v2/fake/servers/detail') - res = req.get_response(fakes.wsgi_app(init_only=('servers,'))) + req = fakes.HTTPRequest.blank(self.base_url + 'detail') + res = req.get_response(self.app) server_dicts = jsonutils.loads(res.body)['servers'] self.assertNotEqual(len(server_dicts), 0) for server_dict in server_dicts: self.assertIn('config_drive', server_dict) -class ServersControllerCreateTest(test.TestCase): +class ConfigDriveTestV2(ConfigDriveTestV21): + base_url = '/v2/fake/servers/' + + def _get_config_drive_controller(self): + return config_drive_v2.Controller() + + def _setup_wsgi(self): + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Config_drive']) + self.app = fakes.wsgi_app(init_only=('servers',)) + + +class ServersControllerCreateTestV21(test.TestCase): + base_url = '/v3/' + bad_request = exception.ValidationError + + def _set_up_controller(self): + ext_info = plugins.LoadedExtensionInfo() + self.controller = servers_v21.ServersController( + extension_info=ext_info) + CONF.set_override('extensions_blacklist', + 'os-config-drive', + 'osapi_v3') + self.no_config_drive_controller = servers_v21.ServersController( + extension_info=ext_info) + + def _verfiy_config_drive(self, **kwargs): + self.assertNotIn('config_drive', kwargs) + + def _initialize_extension(self): + pass def setUp(self): """Shared implementation for tests below that create instance.""" - super(ServersControllerCreateTest, self).setUp() + super(ServersControllerCreateTestV21, self).setUp() self.instance_cache_num = 0 - - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = servers.Controller(self.ext_mgr) + self._set_up_controller() def instance_create(context, inst): inst_type = flavors.get_flavor_by_flavor_id(3) @@ -108,30 +150,34 @@ def instance_create(context, inst): fake.stub_out_image_service(self.stubs) self.stubs.Set(db, 'instance_create', instance_create) - def _test_create_extra(self, params): + def _test_create_extra(self, params, override_controller): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) server.update(params) body = dict(server=server) - req = fakes.HTTPRequest.blank('/v2/fake/servers') + req = fakes.HTTPRequest.blank(self.base_url + 'servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" - server = self.controller.create(req, body=body).obj['server'] + if override_controller is not None: + server = override_controller.create(req, body=body).obj['server'] + else: + server = self.controller.create(req, body=body).obj['server'] def test_create_instance_with_config_drive_disabled(self): params = {'config_drive': "False"} old_create = compute_api.API.create def create(*args, **kwargs): - self.assertIsNone(kwargs['config_drive']) + self._verfiy_config_drive(**kwargs) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) - self._test_create_extra(params) + self._test_create_extra(params, + override_controller=self.no_config_drive_controller) def _create_instance_body_of_config_drive(self, param): - self.ext_mgr.extensions = {'os-config-drive': 'fake'} + self._initialize_extension() def create(*args, **kwargs): self.assertIn('config_drive', kwargs) @@ -140,7 +186,7 @@ def create(*args, **kwargs): old_create = compute_api.API.create self.stubs.Set(compute_api.API, 'create', create) image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - flavor_ref = 'http://localhost/v2/fake/flavors/3' + flavor_ref = ('http://localhost' + self.base_url + 'flavors/3') body = { 'server': { 'name': 'config_drive_test', @@ -150,7 +196,7 @@ def create(*args, **kwargs): }, } - req = fakes.HTTPRequest.blank('/v2/fake/servers') + req = fakes.HTTPRequest.blank(self.base_url + 'servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" @@ -174,7 +220,7 @@ def test_create_instance_with_config_drive_as_boolean_string(self): def test_create_instance_with_bad_config_drive(self): param = 12345 req, body = self._create_instance_body_of_config_drive(param) - self.assertRaises(webob.exc.HTTPBadRequest, + self.assertRaises(self.bad_request, self.controller.create, req, body=body) def test_create_instance_without_config_drive(self): @@ -185,6 +231,29 @@ def test_create_instance_without_config_drive(self): server = res['server'] self.assertEqual(fakes.FAKE_UUID, server['id']) + def test_create_instance_with_empty_config_drive(self): + param = '' + req, body = self._create_instance_body_of_config_drive(param) + self.assertRaises(exception.ValidationError, + self.controller.create, req, body=body) + + +class ServersControllerCreateTestV2(ServersControllerCreateTestV21): + base_url = '/v2/fake/' + bad_request = webob.exc.HTTPBadRequest + + def _set_up_controller(self): + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = servers_v2.Controller(self.ext_mgr) + self.no_config_drive_controller = None + + def _verfiy_config_drive(self, **kwargs): + self.assertIsNone(kwargs['config_drive']) + + def _initialize_extension(self): + self.ext_mgr.extensions = {'os-config-drive': 'fake'} + def test_create_instance_with_empty_config_drive(self): param = '' req, body = self._create_instance_body_of_config_drive(param) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py b/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py deleted file mode 100644 index a0adb97e65..0000000000 --- a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -from oslo.config import cfg -import webob - -from nova.api.openstack.compute import plugins -from nova.api.openstack.compute.plugins.v3 import config_drive -from nova.api.openstack.compute.plugins.v3 import servers -from nova.compute import api as compute_api -from nova.compute import flavors -from nova import db -from nova import exception -from nova.network import manager -from nova.openstack.common import jsonutils -from nova import test -from nova.tests.api.openstack import fakes -from nova.tests import fake_instance -from nova.tests.image import fake - - -CONF = cfg.CONF -FAKE_UUID = fakes.FAKE_UUID - - -def fake_gen_uuid(): - return FAKE_UUID - - -def return_security_group(context, instance_id, security_group_id): - pass - - -class ConfigDriveTest(test.TestCase): - - def setUp(self): - super(ConfigDriveTest, self).setUp() - fakes.stub_out_networking(self.stubs) - fakes.stub_out_rate_limiting(self.stubs) - fake.stub_out_image_service(self.stubs) - - def test_show(self): - self.stubs.Set(db, 'instance_get', - fakes.fake_instance_get()) - self.stubs.Set(db, 'instance_get_by_uuid', - fakes.fake_instance_get()) - req = webob.Request.blank('/v3/servers/1') - req.headers['Content-Type'] = 'application/json' - response = req.get_response(fakes.wsgi_app_v3( - init_only=('servers', 'os-config-drive'))) - self.assertEqual(response.status_int, 200) - res_dict = jsonutils.loads(response.body) - self.assertIn(config_drive.ATTRIBUTE_NAME, res_dict['server']) - - def test_detail_servers(self): - self.stubs.Set(db, 'instance_get_all_by_filters', - fakes.fake_instance_get_all_by_filters()) - self.stubs.Set(db, 'instance_get_by_uuid', - fakes.fake_instance_get()) - req = fakes.HTTPRequestV3.blank('/v3/servers/detail') - res = req.get_response(fakes.wsgi_app_v3( - init_only=('servers', 'os-config-drive'))) - server_dicts = jsonutils.loads(res.body)['servers'] - self.assertNotEqual(len(server_dicts), 0) - for server_dict in server_dicts: - self.assertIn(config_drive.ATTRIBUTE_NAME, server_dict) - - -class ServersControllerCreateTest(test.TestCase): - - def setUp(self): - """Shared implementation for tests below that create instance.""" - super(ServersControllerCreateTest, self).setUp() - - self.flags(verbose=True, - enable_instance_password=True) - self.instance_cache_num = 0 - self.instance_cache_by_id = {} - self.instance_cache_by_uuid = {} - - ext_info = plugins.LoadedExtensionInfo() - self.controller = servers.ServersController(extension_info=ext_info) - CONF.set_override('extensions_blacklist', 'os-config-drive', - 'osapi_v3') - self.no_config_drive_controller = servers.ServersController( - extension_info=ext_info) - - def instance_create(context, inst): - inst_type = flavors.get_flavor_by_flavor_id(3) - image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - def_image_ref = 'http://localhost/images/%s' % image_uuid - self.instance_cache_num += 1 - instance = fake_instance.fake_db_instance(**{ - 'id': self.instance_cache_num, - 'display_name': inst['display_name'] or 'test', - 'uuid': FAKE_UUID, - 'instance_type': dict(inst_type), - 'access_ip_v4': '1.2.3.4', - 'access_ip_v6': 'fead::1234', - 'image_ref': inst.get('image_ref', def_image_ref), - 'user_id': 'fake', - 'project_id': 'fake', - 'reservation_id': inst['reservation_id'], - "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), - "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), - "config_drive": None, - "progress": 0, - "fixed_ips": [], - "task_state": "", - "vm_state": "", - "root_device_name": inst.get('root_device_name', 'vda'), - }) - - self.instance_cache_by_id[instance['id']] = instance - self.instance_cache_by_uuid[instance['uuid']] = instance - return instance - - def instance_get(context, instance_id): - """Stub for compute/api create() pulling in instance after - scheduling - """ - return self.instance_cache_by_id[instance_id] - - def instance_update(context, uuid, values): - instance = self.instance_cache_by_uuid[uuid] - instance.update(values) - return instance - - def server_update(context, instance_uuid, params): - inst = self.instance_cache_by_uuid[instance_uuid] - inst.update(params) - return (inst, inst) - - def fake_method(*args, **kwargs): - pass - - def project_get_networks(context, user_id): - return dict(id='1', host='localhost') - - def queue_get_for(context, *args): - return 'network_topic' - - fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_key_pair_funcs(self.stubs) - fake.stub_out_image_service(self.stubs) - fakes.stub_out_nw_api(self.stubs) - self.stubs.Set(uuid, 'uuid4', fake_gen_uuid) - self.stubs.Set(db, 'instance_add_security_group', - return_security_group) - self.stubs.Set(db, 'project_get_networks', - project_get_networks) - self.stubs.Set(db, 'instance_create', instance_create) - self.stubs.Set(db, 'instance_system_metadata_update', - fake_method) - self.stubs.Set(db, 'instance_get', instance_get) - self.stubs.Set(db, 'instance_update', instance_update) - self.stubs.Set(db, 'instance_update_and_get_original', - server_update) - self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip', - fake_method) - - def _test_create_extra(self, params, no_image=False, - override_controller=None): - image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) - if no_image: - server.pop('imageRef', None) - server.update(params) - body = dict(server=server) - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - if override_controller: - server = override_controller.create(req, body=body).obj['server'] - else: - server = self.controller.create(req, body=body).obj['server'] - - def test_create_instance_with_config_drive_disabled(self): - params = {config_drive.ATTRIBUTE_NAME: "False"} - old_create = compute_api.API.create - - def create(*args, **kwargs): - self.assertNotIn('config_drive', kwargs) - return old_create(*args, **kwargs) - - self.stubs.Set(compute_api.API, 'create', create) - self._test_create_extra(params, - override_controller=self.no_config_drive_controller) - - def _create_instance_body_of_config_drive(self, param): - def create(*args, **kwargs): - self.assertIn('config_drive', kwargs) - return old_create(*args, **kwargs) - - old_create = compute_api.API.create - self.stubs.Set(compute_api.API, 'create', create) - image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - flavor_ref = 'http://localhost/v3/flavors/3' - body = { - 'server': { - 'name': 'config_drive_test', - 'imageRef': image_href, - 'flavorRef': flavor_ref, - 'metadata': { - 'hello': 'world', - 'open': 'stack', - }, - config_drive.ATTRIBUTE_NAME: param, - }, - } - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - - return req, body - - def test_create_instance_with_config_drive(self): - param = True - req, body = self._create_instance_body_of_config_drive(param) - res = self.controller.create(req, body=body).obj - server = res['server'] - self.assertEqual(FAKE_UUID, server['id']) - - def test_create_instance_with_config_drive_as_boolean_string(self): - param = 'false' - req, body = self._create_instance_body_of_config_drive(param) - res = self.controller.create(req, body=body).obj - server = res['server'] - self.assertEqual(FAKE_UUID, server['id']) - - def test_create_instance_with_bad_config_drive(self): - param = 12345 - req, body = self._create_instance_body_of_config_drive(param) - self.assertRaises(exception.ValidationError, - self.controller.create, req, body=body) - - def test_create_instance_without_config_drive(self): - param = True - req, body = self._create_instance_body_of_config_drive(param) - del body['server'][config_drive.ATTRIBUTE_NAME] - res = self.controller.create(req, body=body).obj - server = res['server'] - self.assertEqual(FAKE_UUID, server['id']) - - def test_create_instance_with_empty_config_drive(self): - param = '' - req, body = self._create_instance_body_of_config_drive(param) - self.assertRaises(exception.ValidationError, - self.controller.create, req, body=body) diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl index e000296910..d33b6b99a5 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl @@ -48,7 +48,7 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-config-drive:config_drive": "", + "config_drive": "", "os-extended-availability-zone:availability_zone": "nova", "os-extended-server-attributes:host": "%(compute_host)s", "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s", diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl index 652714cf0f..1d37aca1c1 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl @@ -49,7 +49,7 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-config-drive:config_drive": "", + "config_drive": "", "os-extended-availability-zone:availability_zone": "nova", "os-extended-server-attributes:host": "%(compute_host)s", "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s", diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl index 13f51f5875..784d494167 100644 --- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl @@ -1,6 +1,6 @@ { "server": { - "os-config-drive:config_drive": "%(cdrive)s", + "config_drive": "%(cdrive)s", "addresses": { "private": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl index cc7fe80d46..31b8a62b4d 100644 --- a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl @@ -1,7 +1,7 @@ { "servers": [ { - "os-config-drive:config_drive": "%(cdrive)s", + "config_drive": "%(cdrive)s", "addresses": { "private": [ { From e9a5463bf71cb2f2e7185dea3ed8ee8b973a3208 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 20 Aug 2014 10:27:56 -0400 Subject: [PATCH 445/486] Restore backward compat for int/float in extra_specs In icehouse, we were able to store ints, floats in extra_specs, with I195bd5d45a896e9b26dd81dab1e49c9f939b4805 we forced the value(s) to be just strings. Let's loosen up this restriction and still enforce a length check. Closes-Bug: #1358818 Change-Id: I7687d0214e44d4af1b595c6c6c7ce685d4083556 --- nova/api/openstack/compute/contrib/flavorextraspecs.py | 6 ++++++ .../openstack/compute/contrib/test_flavors_extra_specs.py | 8 +++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py index 2f6f06f75c..f22284d299 100644 --- a/nova/api/openstack/compute/contrib/flavorextraspecs.py +++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py @@ -15,6 +15,7 @@ """The instance type extra specs extension.""" +import six from webob import exc from nova.api.openstack import extensions @@ -70,6 +71,11 @@ def _check_extra_specs(self, specs): try: utils.check_string_length(key, 'extra_specs key', min_length=1, max_length=255) + + # NOTE(dims): The following check was added for backwards + # compatibility. + if (isinstance(value, (int, long, float))): + value = six.text_type(value) utils.check_string_length(value, 'extra_specs value', max_length=255) except exception.InvalidInput as error: diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py index f9b549a330..1b3c330d15 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py @@ -151,13 +151,15 @@ def test_create(self): self.stubs.Set(nova.db, 'flavor_extra_specs_update_or_create', return_create_flavor_extra_specs) - body = {"extra_specs": {"key1": "value1"}} + body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}} req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs', use_admin_context=True) res_dict = self.controller.create(req, 1, body) self.assertEqual('value1', res_dict['extra_specs']['key1']) + self.assertEqual(0.5, res_dict['extra_specs']['key2']) + self.assertEqual(5, res_dict['extra_specs']['key3']) def test_create_no_admin(self): self.stubs.Set(nova.db, @@ -225,6 +227,10 @@ def test_create_long_value(self): value = "a" * 256 self._test_create_bad_request({"extra_specs": {"key1": value}}) + def test_create_really_long_integer_value(self): + value = 10 ** 1000 + self._test_create_bad_request({"extra_specs": {"key1": value}}) + @mock.patch('nova.db.flavor_extra_specs_update_or_create') def test_create_invalid_specs_key(self, mock_flavor_extra_specs): invalid_keys = ("key1/", "", "$$akey$", "!akey", "") From 0f79c9ef0b2703da6389a9186cca6a905cea1a17 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 15 Aug 2014 13:56:55 +0000 Subject: [PATCH 446/486] objects: Fix InstanceGroup.obj_make_compatible() The obj_make_compatible() method of the InstanceGroup object incorrectly assumed the format of the version argument was the version broken down into int() parts. It comes in as a string and must be converted. Change-Id: Id2724c5029202f001e5a2b4371f030b65c72fdfd --- nova/objects/instance_group.py | 2 ++ nova/tests/objects/test_instance_group.py | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py index 1e1691701c..842c0ac4a7 100644 --- a/nova/objects/instance_group.py +++ b/nova/objects/instance_group.py @@ -18,6 +18,7 @@ from nova.objects import base from nova.objects import fields from nova.openstack.common import uuidutils +from nova import utils class InstanceGroup(base.NovaPersistentObject, base.NovaObject): @@ -45,6 +46,7 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject): } def obj_make_compatible(self, primitive, target_version): + target_version = utils.convert_version_to_tuple(target_version) if target_version < (1, 7): # NOTE(danms): Before 1.7, we had an always-empty # metadetails property diff --git a/nova/tests/objects/test_instance_group.py b/nova/tests/objects/test_instance_group.py index 113e0dd688..3099059ff8 100644 --- a/nova/tests/objects/test_instance_group.py +++ b/nova/tests/objects/test_instance_group.py @@ -269,6 +269,14 @@ def test_get_hosts_with_some_none(self): self.assertEqual(1, len(hosts)) self.assertIn('hostB', hosts) + def test_obj_make_compatible(self): + group = instance_group.InstanceGroup(uuid='fake-uuid', + name='fake-name') + group.create(self.context) + group_primitive = group.obj_to_primitive() + group.obj_make_compatible(group_primitive, '1.6') + self.assertEqual({}, group_primitive['metadetails']) + class TestInstanceGroupObject(test_objects._LocalTest, _TestInstanceGroupObjects): From 10c51055c1bcc2acd307903bef3622df65a8ed43 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 15 Aug 2014 15:05:44 +0000 Subject: [PATCH 447/486] objects: Add base test for obj_make_compatible() A previous commit fixed a bug in obj_make_compatible() of the InstanceGroup object. While fixing that, I added a test targeted at that object. This patch adds a test that calls obj_make_compatible() on every past version of every object. It doesn't actually test that the data is converted, but it makes sure that calling the method doesn't blow up on something basic like expecting the wrong format of the version parameter. Only one object had to change for this test to work, and that was a test object. It assumed a field was always set, and it's not in the case of this test. However, the field in question is not actually set as nullable, so it *should* be set. However, the way these methods are implemented elsewhere, they never assume the field to be set and present in the primitive version of the object. Change-Id: Id56b2f9763cfcb99a3c4a2d56329485c5e9c3b5c --- nova/tests/objects/test_objects.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 05d20f9fce..25c2780337 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -30,10 +30,15 @@ from nova.objects import base from nova.objects import fields from nova.openstack.common import jsonutils +from nova.openstack.common import log from nova.openstack.common import timeutils from nova import rpc from nova import test from nova.tests import fake_notifier +from nova import utils + + +LOG = log.getLogger(__name__) class MyOwnedObject(base.NovaPersistentObject, base.NovaObject): @@ -99,7 +104,7 @@ def modify_save_modify(self, context): def obj_make_compatible(self, primitive, target_version): # NOTE(danms): Simulate an older version that had a different # format for the 'bar' attribute - if target_version == '1.1': + if target_version == '1.1' and 'bar' in primitive: primitive['bar'] = 'old%s' % primitive['bar'] @@ -1080,3 +1085,18 @@ def test_relationships(self): 'parent objects and provide a rule in their ' 'obj_make_compatible() routines to backlevel ' 'the child object.') + + def test_obj_make_compatible(self): + # Iterate all object classes and verify that we can run + # obj_make_compatible with every older version than current. + # This doesn't actually test the data conversions, but it at least + # makes sure the method doesn't blow up on something basic like + # expecting the wrong version format. + for obj_name in base.NovaObject._obj_classes: + obj_class = base.NovaObject._obj_classes[obj_name][0] + version = utils.convert_version_to_tuple(obj_class.VERSION) + for n in range(version[1]): + test_version = '%d.%d' % (version[0], n) + LOG.info('testing obj: %s version: %s' % + (obj_name, test_version)) + obj_class().obj_to_primitive(target_version=test_version) From 926f94534da7b3cd868eb6495a2eaadd5faff6b9 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Thu, 21 Aug 2014 16:58:13 +0100 Subject: [PATCH 448/486] scheduler sends select_destinations notifications scheduler_run_instance sends start/end notifications so you can tell how long it takes for the scheduler to do its work. The new select_destinations does not send any such notifications. To make the new code patch more consistent with the notifications in the old code patch, this change adds in start/end notifications. The notification docs should be updated to reflect this change. DocImpact Closes-Bug: #1359835 Change-Id: I9b3f6619823c57cbc21d714edd78527c0015b4f6 --- nova/scheduler/filter_scheduler.py | 6 ++++++ nova/tests/scheduler/test_filter_scheduler.py | 16 ++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index cd0e5654f9..b71a257141 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -140,6 +140,9 @@ def schedule_run_instance(self, context, request_spec, def select_destinations(self, context, request_spec, filter_properties): """Selects a filtered set of hosts and nodes.""" + self.notifier.info(context, 'scheduler.select_destinations.start', + dict(request_spec=request_spec)) + num_instances = request_spec['num_instances'] selected_hosts = self._schedule(context, request_spec, filter_properties) @@ -150,6 +153,9 @@ def select_destinations(self, context, request_spec, filter_properties): dests = [dict(host=host.obj.host, nodename=host.obj.nodename, limits=host.obj.limits) for host in selected_hosts] + + self.notifier.info(context, 'scheduler.select_destinations.end', + dict(request_spec=request_spec)) return dests def _provision_resource(self, context, weighed_host, request_spec, diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index aff3354a6b..064eb4d50c 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -606,6 +606,22 @@ def _fake_weigh_objects(_self, functions, hosts, options): self.assertEqual(host, selected_hosts[0]) self.assertEqual(node, selected_nodes[0]) + @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule') + def test_select_destinations_notifications(self, mock_schedule): + mock_schedule.return_value = [mock.Mock()] + + with mock.patch.object(self.driver.notifier, 'info') as mock_info: + request_spec = {'num_instances': 1} + + self.driver.select_destinations(self.context, request_spec, {}) + + expected = [ + mock.call(self.context, 'scheduler.select_destinations.start', + dict(request_spec=request_spec)), + mock.call(self.context, 'scheduler.select_destinations.end', + dict(request_spec=request_spec))] + self.assertEqual(expected, mock_info.call_args_list) + def test_select_destinations_no_valid_host(self): def _return_no_host(*args, **kwargs): From c7465dcf8b66a6b8756fb9525134502177057eec Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Thu, 21 Aug 2014 13:41:39 -0600 Subject: [PATCH 449/486] Fix comment typo I noticed a typo while getting familiar with the code. s/patially/partially/ Change-Id: Ife5ef787051016b4f4d21df402c1942304a5b7c3 --- nova/virt/libvirt/driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index a9c3c72135..5484f37edf 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -1076,7 +1076,7 @@ def cleanup(self, context, instance, network_info, block_device_info=None, with excutils.save_and_reraise_exception() as ctxt: if destroy_disks: # Don't block on Volume errors if we're trying to - # delete the instance as we may be patially created + # delete the instance as we may be partially created # or deleted ctxt.reraise = False LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s " From 21aeb80a83e157dbc2ba27f16a83c98a37b7067d Mon Sep 17 00:00:00 2001 From: Yaguang Tang Date: Fri, 15 Aug 2014 10:22:28 +0800 Subject: [PATCH 450/486] Correct seconds of a day from 84400 to 86400 Correct a day to 86400 seconds which is used by API rate limit config Closes-bug: #1357152 Change-Id: Ieceacd00f02b6ed27032cc0ac2f55be8b976a070 --- nova/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/utils.py b/nova/utils.py index 4533ed2201..6aea79149e 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -89,7 +89,7 @@ 'SECOND': 1, 'MINUTE': 60, 'HOUR': 3600, - 'DAY': 84400 + 'DAY': 86400 } From 007ed9b6c825da482275b30f982eac46690337a3 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Tue, 24 Jun 2014 14:01:04 +0930 Subject: [PATCH 451/486] Change error status code for out of quota to be 403 instead of 413 Instead of returning 413 as the error status code when the request fails due to lack of quota we should return 403 DocImpact: Return 403 Forbidden instead of 413 RequestEntityTooLarge when a request fails due to lack of quota Change-Id: I7c179748769b8a0e2cc2e476ebefe33449f6f304 Closes-Bug: 1298131 --- nova/api/openstack/common.py | 3 +-- nova/api/openstack/compute/contrib/admin_actions.py | 4 +--- nova/api/openstack/compute/contrib/deferred_delete.py | 4 +--- nova/api/openstack/compute/contrib/keypairs.py | 4 +--- nova/api/openstack/compute/contrib/security_groups.py | 2 +- nova/api/openstack/compute/plugins/v3/create_backup.py | 2 +- .../openstack/compute/plugins/v3/deferred_delete.py | 6 ++---- nova/api/openstack/compute/plugins/v3/keypairs.py | 6 ++---- .../api/openstack/compute/plugins/v3/migrate_server.py | 10 +++------- .../openstack/compute/plugins/v3/server_metadata.py | 10 ++++------ nova/api/openstack/compute/plugins/v3/servers.py | 2 +- nova/api/openstack/compute/server_metadata.py | 4 +--- nova/exception.py | 3 +++ .../api/openstack/compute/contrib/test_keypairs.py | 8 ++++---- .../openstack/compute/contrib/test_security_groups.py | 6 ++---- .../api/openstack/compute/plugins/v3/test_keypairs.py | 8 ++++---- .../compute/plugins/v3/test_migrate_server.py | 2 +- .../compute/plugins/v3/test_server_actions.py | 2 +- .../compute/plugins/v3/test_server_metadata.py | 8 ++++---- .../tests/api/openstack/compute/test_image_metadata.py | 8 ++++---- .../tests/api/openstack/compute/test_server_actions.py | 2 +- .../api/openstack/compute/test_server_metadata.py | 8 ++++---- 22 files changed, 47 insertions(+), 65 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 5ac796ac45..91fcbffc55 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -291,8 +291,7 @@ def check_img_metadata_properties_quota(context, metadata): QUOTAS.limit_check(context, metadata_items=len(metadata)) except exception.OverQuota: expl = _("Image metadata limit exceeded") - raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl, - headers={'Retry-After': 0}) + raise webob.exc.HTTPForbidden(explanation=expl) # check the key length. if isinstance(metadata, dict): diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py index 8921aeb944..361ab482a6 100644 --- a/nova/api/openstack/compute/contrib/admin_actions.py +++ b/nova/api/openstack/compute/contrib/admin_actions.py @@ -144,9 +144,7 @@ def _migrate(self, req, id, body): instance = self.compute_api.get(context, id, want_objects=True) self.compute_api.resize(req.environ['nova.context'], instance) except exception.QuotaError as error: - raise exc.HTTPRequestEntityTooLarge( - explanation=error.format_message(), - headers={'Retry-After': 0}) + raise exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: diff --git a/nova/api/openstack/compute/contrib/deferred_delete.py b/nova/api/openstack/compute/contrib/deferred_delete.py index cb895f625a..f457111fad 100644 --- a/nova/api/openstack/compute/contrib/deferred_delete.py +++ b/nova/api/openstack/compute/contrib/deferred_delete.py @@ -43,9 +43,7 @@ def _restore(self, req, id, body): try: self.compute_api.restore(context, instance) except exception.QuotaError as error: - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=error.format_message(), - headers={'Retry-After': 0}) + raise webob.exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'restore') diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py index cd023e8eae..dd9c47873a 100644 --- a/nova/api/openstack/compute/contrib/keypairs.py +++ b/nova/api/openstack/compute/contrib/keypairs.py @@ -102,9 +102,7 @@ def create(self, req, body): except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=msg, - headers={'Retry-After': 0}) + raise webob.exc.HTTPForbidden(explanation=msg) except exception.InvalidKeypair as exc: raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) except exception.KeyPairExists as exc: diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py index 415fcfed9e..102d2fad2b 100644 --- a/nova/api/openstack/compute/contrib/security_groups.py +++ b/nova/api/openstack/compute/contrib/security_groups.py @@ -194,7 +194,7 @@ def translate_exceptions(): raise exc.HTTPNotFound(explanation=msg) except exception.SecurityGroupLimitExceeded as exp: msg = exp.format_message() - raise exc.HTTPRequestEntityTooLarge(explanation=msg) + raise exc.HTTPForbidden(explanation=msg) except exception.NoUniqueMatch as exp: msg = exp.format_message() raise exc.HTTPConflict(explanation=msg) diff --git a/nova/api/openstack/compute/plugins/v3/create_backup.py b/nova/api/openstack/compute/plugins/v3/create_backup.py index 2ca40eb11d..61dc1f1f3a 100644 --- a/nova/api/openstack/compute/plugins/v3/create_backup.py +++ b/nova/api/openstack/compute/plugins/v3/create_backup.py @@ -34,7 +34,7 @@ def __init__(self, *args, **kwargs): super(CreateBackupController, self).__init__(*args, **kwargs) self.compute_api = compute.API() - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409)) @wsgi.action('create_backup') @validation.schema(create_backup.create_backup) def _create_backup(self, req, id, body): diff --git a/nova/api/openstack/compute/plugins/v3/deferred_delete.py b/nova/api/openstack/compute/plugins/v3/deferred_delete.py index f3e30afedc..416ebd1625 100644 --- a/nova/api/openstack/compute/plugins/v3/deferred_delete.py +++ b/nova/api/openstack/compute/plugins/v3/deferred_delete.py @@ -33,7 +33,7 @@ def __init__(self, *args, **kwargs): super(DeferredDeleteController, self).__init__(*args, **kwargs) self.compute_api = compute.API() - @extensions.expected_errors((404, 409, 413)) + @extensions.expected_errors((404, 409, 403)) @wsgi.action('restore') def _restore(self, req, id, body): """Restore a previously deleted instance.""" @@ -44,9 +44,7 @@ def _restore(self, req, id, body): try: self.compute_api.restore(context, instance) except exception.QuotaError as error: - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=error.format_message(), - headers={'Retry-After': 0}) + raise webob.exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'restore') diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py index 2f8e450108..5ce288bd62 100644 --- a/nova/api/openstack/compute/plugins/v3/keypairs.py +++ b/nova/api/openstack/compute/plugins/v3/keypairs.py @@ -48,7 +48,7 @@ def _filter_keypair(self, keypair, **attrs): clean[attr] = keypair[attr] return clean - @extensions.expected_errors((400, 409, 413)) + @extensions.expected_errors((400, 403, 409)) @wsgi.response(201) @validation.schema(keypairs.create) def create(self, req, body): @@ -86,9 +86,7 @@ def create(self, req, body): except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=msg, - headers={'Retry-After': 0}) + raise webob.exc.HTTPForbidden(explanation=msg) except exception.InvalidKeypair as exc: raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) except exception.KeyPairExists as exc: diff --git a/nova/api/openstack/compute/plugins/v3/migrate_server.py b/nova/api/openstack/compute/plugins/v3/migrate_server.py index 87e9a939e4..fb6bfd45f9 100644 --- a/nova/api/openstack/compute/plugins/v3/migrate_server.py +++ b/nova/api/openstack/compute/plugins/v3/migrate_server.py @@ -38,7 +38,7 @@ def __init__(self, *args, **kwargs): super(MigrateServerController, self).__init__(*args, **kwargs) self.compute_api = compute.API() - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409)) @wsgi.action('migrate') def _migrate(self, req, id, body): """Permit admins to migrate a server to a new host.""" @@ -49,12 +49,8 @@ def _migrate(self, req, id, body): want_objects=True) try: self.compute_api.resize(req.environ['nova.context'], instance) - except exception.TooManyInstances as e: - raise exc.HTTPRequestEntityTooLarge(explanation=e.format_message()) - except exception.QuotaError as error: - raise exc.HTTPRequestEntityTooLarge( - explanation=error.format_message(), - headers={'Retry-After': 0}) + except (exception.TooManyInstances, exception.QuotaError) as e: + raise exc.HTTPForbidden(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: diff --git a/nova/api/openstack/compute/plugins/v3/server_metadata.py b/nova/api/openstack/compute/plugins/v3/server_metadata.py index 89709e1e72..44ef229969 100644 --- a/nova/api/openstack/compute/plugins/v3/server_metadata.py +++ b/nova/api/openstack/compute/plugins/v3/server_metadata.py @@ -53,7 +53,7 @@ def index(self, req, server_id): context = req.environ['nova.context'] return {'metadata': self._get_metadata(context, server_id)} - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409, 413)) @wsgi.response(201) def create(self, req, server_id, body): if not self.is_valid_body(body, 'metadata'): @@ -69,7 +69,7 @@ def create(self, req, server_id, body): return {'metadata': new_metadata} - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409, 413)) def update(self, req, server_id, id, body): if not self.is_valid_body(body, 'metadata'): msg = _("Malformed request body") @@ -91,7 +91,7 @@ def update(self, req, server_id, id, body): return {'metadata': meta_item} - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409, 413)) def update_all(self, req, server_id, body): if not self.is_valid_body(body, 'metadata'): msg = _("Malformed request body") @@ -123,9 +123,7 @@ def _update_instance_metadata(self, context, server_id, metadata, explanation=error.format_message()) except exception.QuotaError as error: - raise exc.HTTPRequestEntityTooLarge( - explanation=error.format_message(), - headers={'Retry-After': 0}) + raise exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 1009556941..2012b8e176 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -912,7 +912,7 @@ def _action_rebuild(self, req, id, body): robj = wsgi.ResponseObject(view) return self._add_location(robj) - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409)) @wsgi.response(202) @wsgi.action('create_image') @common.check_snapshots_enabled diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py index a0340bcb60..68cc188e0d 100644 --- a/nova/api/openstack/compute/server_metadata.py +++ b/nova/api/openstack/compute/server_metadata.py @@ -134,9 +134,7 @@ def _update_instance_metadata(self, context, server_id, metadata, explanation=error.format_message()) except exception.QuotaError as error: - raise exc.HTTPRequestEntityTooLarge( - explanation=error.format_message(), - headers={'Retry-After': 0}) + raise exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) diff --git a/nova/exception.py b/nova/exception.py index 52302469a4..322a2a43e5 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1152,6 +1152,9 @@ class NoValidHost(NovaException): class QuotaError(NovaException): ec2_code = 'ResourceLimitExceeded' msg_fmt = _("Quota exceeded: code=%(code)s") + # NOTE(cyeoh): 413 should only be used for the ec2 API + # The error status code for out of quota for the nova api should be + # 403 Forbidden. code = 413 headers = {'Retry-After': 0} safe = True diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py index 6ca588588f..fb53eefc98 100644 --- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/compute/contrib/test_keypairs.py @@ -195,11 +195,11 @@ def fake_quotas_count(self, context, resource, *args, **kwargs): req.body = jsonutils.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) - self.assertEqual(res.status_int, 413) + self.assertEqual(res.status_int, 403) res_dict = jsonutils.loads(res.body) self.assertEqual( "Quota exceeded, too many key pairs.", - res_dict['overLimit']['message']) + res_dict['forbidden']['message']) def test_keypair_create_quota_limit(self): @@ -219,11 +219,11 @@ def fake_quotas_count(self, context, resource, *args, **kwargs): req.body = jsonutils.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) - self.assertEqual(res.status_int, 413) + self.assertEqual(res.status_int, 403) res_dict = jsonutils.loads(res.body) self.assertEqual( "Quota exceeded, too many key pairs.", - res_dict['overLimit']['message']) + res_dict['forbidden']['message']) def test_keypair_create_duplicate(self): self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate) diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py index 492dc44bf3..b3ef7ebeb6 100644 --- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py @@ -287,8 +287,7 @@ def test_create_security_group_quota_limit(self): self.assertEqual(res_dict['security_group']['name'], name) sg = security_group_template() - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, req, {'security_group': sg}) def test_get_security_group_list(self): @@ -1243,8 +1242,7 @@ def test_create_rule_quota_limit(self): 'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121', 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id'] } - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, req, {'security_group_rule': rule}) def test_create_rule_cidr_allow_all(self): diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py b/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py index 91cadd694d..76fb432e73 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py @@ -226,11 +226,11 @@ def fake_quotas_count(self, context, resource, *args, **kwargs): req.body = jsonutils.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) - self.assertEqual(res.status_int, 413) + self.assertEqual(res.status_int, 403) res_dict = jsonutils.loads(res.body) self.assertEqual( "Quota exceeded, too many key pairs.", - res_dict['overLimit']['message']) + res_dict['forbidden']['message']) def test_keypair_create_quota_limit(self): @@ -250,11 +250,11 @@ def fake_quotas_count(self, context, resource, *args, **kwargs): req.body = jsonutils.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) - self.assertEqual(res.status_int, 413) + self.assertEqual(res.status_int, 403) res_dict = jsonutils.loads(res.body) self.assertEqual( "Quota exceeded, too many key pairs.", - res_dict['overLimit']['message']) + res_dict['forbidden']['message']) def test_keypair_create_duplicate(self): self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py b/nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py index d6167c1786..7c2a173fbf 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py @@ -91,7 +91,7 @@ def _test_migrate_exception(self, exc_info, expected_result): def test_migrate_too_many_instances(self): exc_info = exception.TooManyInstances(overs='', req='', used=0, allowed=0, resource='') - self._test_migrate_exception(exc_info, 413) + self._test_migrate_exception(exc_info, 403) def _test_migrate_live_succeeded(self, param): self.mox.StubOutWithMock(self.compute_api, 'live_migrate') diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index 7b1e98638c..5af71d14b7 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -1045,7 +1045,7 @@ def test_create_image_with_too_much_metadata(self): body['create_image']['metadata']['foo%i' % num] = "bar" req = fakes.HTTPRequestV3.blank(self.url) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller._action_create_image, req, FAKE_UUID, body) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py index ce1f343b31..cb4d1a3f1c 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py @@ -479,8 +479,8 @@ def test_too_many_metadata_items_on_create(self): req.body = jsonutils.dumps(data) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, self.uuid, data) + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, req, + self.uuid, data) def test_create_item_value_too_long(self): self.stubs.Set(nova.db, 'instance_metadata_update', @@ -505,8 +505,8 @@ def test_too_many_metadata_items_on_update_item(self): req.body = jsonutils.dumps(data) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update_all, req, self.uuid, data) + self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all, + req, self.uuid, data) def test_invalid_metadata_items_on_update_item(self): self.stubs.Set(nova.db, 'instance_metadata_update', diff --git a/nova/tests/api/openstack/compute/test_image_metadata.py b/nova/tests/api/openstack/compute/test_image_metadata.py index 822157ea79..ac43c7768c 100644 --- a/nova/tests/api/openstack/compute/test_image_metadata.py +++ b/nova/tests/api/openstack/compute/test_image_metadata.py @@ -265,7 +265,7 @@ def test_delete_image_not_found(self, _get_mocked): self.controller.delete, req, '100', 'key1') @mock.patch(CHK_QUOTA_STR, - side_effect=webob.exc.HTTPRequestEntityTooLarge( + side_effect=webob.exc.HTTPForbidden( explanation='', headers={'Retry-After': 0})) @mock.patch('nova.image.api.API.update') @mock.patch('nova.image.api.API.get', return_value=get_image_123()) @@ -277,12 +277,12 @@ def test_too_many_metadata_items_on_create(self, _get_mocked, req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, req, '123', body) self.assertFalse(update_mocked.called) @mock.patch(CHK_QUOTA_STR, - side_effect=webob.exc.HTTPRequestEntityTooLarge( + side_effect=webob.exc.HTTPForbidden( explanation='', headers={'Retry-After': 0})) @mock.patch('nova.image.api.API.update') @mock.patch('nova.image.api.API.get', return_value=get_image_123()) @@ -295,7 +295,7 @@ def test_too_many_metadata_items_on_put(self, _get_mocked, req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, '123', 'blah', body) self.assertFalse(update_mocked.called) diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 848f897668..6d03436708 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -1263,7 +1263,7 @@ def test_create_image_with_too_much_metadata(self): body['createImage']['metadata']['foo%i' % num] = "bar" req = fakes.HTTPRequest.blank(self.url) - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, + self.assertRaises(webob.exc.HTTPForbidden, self.controller._action_create_image, req, FAKE_UUID, body) diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py index 2f8659fde0..71974a95bf 100644 --- a/nova/tests/api/openstack/compute/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/test_server_metadata.py @@ -453,8 +453,8 @@ def test_too_many_metadata_items_on_create(self): req.body = jsonutils.dumps(data) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.create, req, self.uuid, data) + self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, + req, self.uuid, data) def test_invalid_metadata_items_on_create(self): self.stubs.Set(nova.db, 'instance_metadata_update', @@ -492,8 +492,8 @@ def test_too_many_metadata_items_on_update_item(self): req.body = jsonutils.dumps(data) req.headers["content-type"] = "application/json" - self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, - self.controller.update_all, req, self.uuid, data) + self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all, + req, self.uuid, data) def test_invalid_metadata_items_on_update_item(self): self.stubs.Set(nova.db, 'instance_metadata_update', From d05cc3f78f74acd8db688867ddb262c14e50e1d0 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Thu, 21 Aug 2014 18:03:25 +0900 Subject: [PATCH 452/486] Backport some v3 scheduler hints API UT to v2 API This patch adds below v3 scheduler hints API unittest into V2 API to improve V2 API unittesting. Change-Id: I5352906567c1c90c33d0709addaeb445fe1a7371 --- .../compute/contrib/test_scheduler_hints.py | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py index 0d867b8645..c386fcc2fb 100644 --- a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py +++ b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py @@ -13,11 +13,19 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime + from nova.api.openstack import compute +from nova.api.openstack.compute import servers +from nova.api.openstack import extensions import nova.compute.api +from nova.compute import flavors +from nova import db from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes +from nova.tests import fake_instance +from nova.tests.image import fake UUID = fakes.FAKE_UUID @@ -95,3 +103,68 @@ def test_create_server_bad_hints(self): req.body = jsonutils.dumps(body) res = req.get_response(self.app) self.assertEqual(400, res.status_int) + + +class ServersControllerCreateTest(test.TestCase): + + def setUp(self): + """Shared implementation for tests below that create instance.""" + super(ServersControllerCreateTest, self).setUp() + + self.instance_cache_num = 0 + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.controller = servers.Controller(self.ext_mgr) + + def instance_create(context, inst): + inst_type = flavors.get_flavor_by_flavor_id(3) + image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' + def_image_ref = 'http://localhost/images/%s' % image_uuid + self.instance_cache_num += 1 + instance = fake_instance.fake_db_instance(**{ + 'id': self.instance_cache_num, + 'display_name': inst['display_name'] or 'test', + 'uuid': fakes.FAKE_UUID, + 'instance_type': dict(inst_type), + 'access_ip_v4': '1.2.3.4', + 'access_ip_v6': 'fead::1234', + 'image_ref': inst.get('image_ref', def_image_ref), + 'user_id': 'fake', + 'project_id': 'fake', + 'reservation_id': inst['reservation_id'], + "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), + "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), + "progress": 0, + "fixed_ips": [], + "task_state": "", + "vm_state": "", + "root_device_name": inst.get('root_device_name', 'vda'), + }) + + return instance + + fake.stub_out_image_service(self.stubs) + self.stubs.Set(db, 'instance_create', instance_create) + + def _test_create_extra(self, params): + image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' + server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) + body = dict(server=server) + body.update(params) + req = fakes.HTTPRequest.blank('/fake//servers') + req.method = 'POST' + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + server = self.controller.create(req, body=body).obj['server'] + + def test_create_instance_with_scheduler_hints_disabled(self): + hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'} + params = {'OS-SCH-HNT:scheduler_hints': hints} + old_create = nova.compute.api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['scheduler_hints'], {}) + return old_create(*args, **kwargs) + + self.stubs.Set(nova.compute.api.API, 'create', create) + self._test_create_extra(params) From 3792bd0e4355c0e2e487c0977e0cdd92b05b3471 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 22 Aug 2014 14:15:28 +0900 Subject: [PATCH 453/486] Convert v3 server SchedulerHints plugin to v2.1 This patch converts v3 server SchedulerHints plugin to v2.1 and makes v2 unit tests share between v2 and v2.1. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Note- to make V2.1 SchedulerHints extension work in V3 framework, an extra parameter (request body) is being passed in server_create function of each extensions. Partially implements blueprint v2-on-v3-api Change-Id: I8a0d50efcda6fcd12971a41505127de5987eec18 --- .../scheduler-hints-post-req.json | 8 +- .../compute/plugins/v3/access_ips.py | 7 +- .../compute/plugins/v3/availability_zone.py | 4 +- .../plugins/v3/block_device_mapping.py | 4 +- .../compute/plugins/v3/config_drive.py | 4 +- .../openstack/compute/plugins/v3/keypairs.py | 4 +- .../compute/plugins/v3/multiple_create.py | 4 +- .../compute/plugins/v3/scheduler_hints.py | 13 +- .../compute/plugins/v3/security_groups.py | 4 +- .../openstack/compute/plugins/v3/servers.py | 10 +- .../openstack/compute/plugins/v3/user_data.py | 4 +- .../compute/contrib/test_scheduler_hints.py | 88 ++++-- .../plugins/v3/test_scheduler_hints.py | 254 ------------------ .../scheduler-hints-post-req.json.tpl | 8 +- 14 files changed, 122 insertions(+), 294 deletions(-) delete mode 100644 nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py diff --git a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json index 0ee3de0871..0466cecb18 100644 --- a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json +++ b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json @@ -2,9 +2,9 @@ "server" : { "name" : "new-server-test", "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", - "flavorRef" : "http://openstack.example.com/openstack/flavors/1", - "os-scheduler-hints:scheduler_hints": { - "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" - } + "flavorRef" : "http://openstack.example.com/openstack/flavors/1" + }, + "OS-SCH-HNT:scheduler_hints": { + "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } diff --git a/nova/api/openstack/compute/plugins/v3/access_ips.py b/nova/api/openstack/compute/plugins/v3/access_ips.py index d7378d3a2a..bf9f97452c 100644 --- a/nova/api/openstack/compute/plugins/v3/access_ips.py +++ b/nova/api/openstack/compute/plugins/v3/access_ips.py @@ -90,7 +90,12 @@ def get_controller_extensions(self): def get_resources(self): return [] - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + # making 'body_deprecated_param' as optional to avoid changes for + # server_update & server_rebuild + def server_create(self, server_dict, create_kwargs, + body_deprecated_param=None): if AccessIPs.v4_key in server_dict: access_ip_v4 = server_dict.get(AccessIPs.v4_key) if access_ip_v4: diff --git a/nova/api/openstack/compute/plugins/v3/availability_zone.py b/nova/api/openstack/compute/plugins/v3/availability_zone.py index dcba03beea..d483d89952 100644 --- a/nova/api/openstack/compute/plugins/v3/availability_zone.py +++ b/nova/api/openstack/compute/plugins/v3/availability_zone.py @@ -139,5 +139,7 @@ def get_controller_extensions(self): """ return [] - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): create_kwargs['availability_zone'] = server_dict.get(ATTRIBUTE_NAME) diff --git a/nova/api/openstack/compute/plugins/v3/block_device_mapping.py b/nova/api/openstack/compute/plugins/v3/block_device_mapping.py index 76d723bd37..0ee6c8340b 100644 --- a/nova/api/openstack/compute/plugins/v3/block_device_mapping.py +++ b/nova/api/openstack/compute/plugins/v3/block_device_mapping.py @@ -40,7 +40,9 @@ def get_controller_extensions(self): # use nova.api.extensions.server.extensions entry point to modify # server create kwargs - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): block_device_mapping = server_dict.get(ATTRIBUTE_NAME, []) try: diff --git a/nova/api/openstack/compute/plugins/v3/config_drive.py b/nova/api/openstack/compute/plugins/v3/config_drive.py index 345c30f2d9..f30b066cbf 100644 --- a/nova/api/openstack/compute/plugins/v3/config_drive.py +++ b/nova/api/openstack/compute/plugins/v3/config_drive.py @@ -68,7 +68,9 @@ def get_controller_extensions(self): def get_resources(self): return [] - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): create_kwargs['config_drive'] = server_dict.get(ATTRIBUTE_NAME) def get_server_create_schema(self): diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py index 2f8e450108..81c8b7424f 100644 --- a/nova/api/openstack/compute/plugins/v3/keypairs.py +++ b/nova/api/openstack/compute/plugins/v3/keypairs.py @@ -178,7 +178,9 @@ def get_controller_extensions(self): # use nova.api.extensions.server.extensions entry point to modify # server create kwargs - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): create_kwargs['key_name'] = server_dict.get('key_name') def get_server_create_schema(self): diff --git a/nova/api/openstack/compute/plugins/v3/multiple_create.py b/nova/api/openstack/compute/plugins/v3/multiple_create.py index 5f60f702fe..4dc1cfa64f 100644 --- a/nova/api/openstack/compute/plugins/v3/multiple_create.py +++ b/nova/api/openstack/compute/plugins/v3/multiple_create.py @@ -42,7 +42,9 @@ def get_controller_extensions(self): # use nova.api.extensions.server.extensions entry point to modify # server create kwargs - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): # min_count and max_count are optional. If they exist, they may come # in as strings. Verify that they are valid integers and > 0. # Also, we want to default 'min_count' to 1, and default diff --git a/nova/api/openstack/compute/plugins/v3/scheduler_hints.py b/nova/api/openstack/compute/plugins/v3/scheduler_hints.py index 35972fdf40..c556cf5f38 100644 --- a/nova/api/openstack/compute/plugins/v3/scheduler_hints.py +++ b/nova/api/openstack/compute/plugins/v3/scheduler_hints.py @@ -33,8 +33,17 @@ def get_controller_extensions(self): def get_resources(self): return [] - def server_create(self, server_dict, create_kwargs): - scheduler_hints = server_dict.get(ALIAS + ':scheduler_hints', {}) + # NOTE(gmann): Accepting request body in this function to fetch "scheduler + # hint". This is a workaround to allow OS_SCH-HNT at the top level + # of the body request, but that it will be changed in the future to be a + # subset of the servers dict. + def server_create(self, server_dict, create_kwargs, req_body): + scheduler_hints = {} + if 'os:scheduler_hints' in req_body: + scheduler_hints = req_body['os:scheduler_hints'] + elif 'OS-SCH-HNT:scheduler_hints' in req_body: + scheduler_hints = req_body['OS-SCH-HNT:scheduler_hints'] + if not isinstance(scheduler_hints, dict): msg = _("Malformed scheduler_hints attribute") raise webob.exc.HTTPBadRequest(explanation=msg) diff --git a/nova/api/openstack/compute/plugins/v3/security_groups.py b/nova/api/openstack/compute/plugins/v3/security_groups.py index 19bc5fa385..9340670bfb 100644 --- a/nova/api/openstack/compute/plugins/v3/security_groups.py +++ b/nova/api/openstack/compute/plugins/v3/security_groups.py @@ -121,7 +121,9 @@ def get_controller_extensions(self): def get_resources(self): return [] - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): security_groups = server_dict.get(ATTRIBUTE_NAME) if security_groups is not None: create_kwargs['security_group'] = [ diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index f0763acafb..0a18d956ae 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -459,7 +459,7 @@ def create(self, req, body): # moved to the extension if list(self.create_extension_manager): self.create_extension_manager.map(self._create_extension_point, - server_dict, create_kwargs) + server_dict, create_kwargs, body) image_uuid = self._image_from_req_data(server_dict, create_kwargs) @@ -570,11 +570,15 @@ def create(self, req, body): return self._add_location(robj) - def _create_extension_point(self, ext, server_dict, create_kwargs): + # NOTE(gmann): Parameter 'req_body' is placed to handle scheduler_hint + # extension for V2.1. No other extension supposed to use this as + # it will be removed soon. + def _create_extension_point(self, ext, server_dict, + create_kwargs, req_body): handler = ext.obj LOG.debug("Running _create_extension_point for %s", ext.obj) - handler.server_create(server_dict, create_kwargs) + handler.server_create(server_dict, create_kwargs, req_body) def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs): handler = ext.obj diff --git a/nova/api/openstack/compute/plugins/v3/user_data.py b/nova/api/openstack/compute/plugins/v3/user_data.py index ab7874c04a..dda968eace 100644 --- a/nova/api/openstack/compute/plugins/v3/user_data.py +++ b/nova/api/openstack/compute/plugins/v3/user_data.py @@ -32,5 +32,7 @@ def get_controller_extensions(self): def get_resources(self): return [] - def server_create(self, server_dict, create_kwargs): + # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' + # parameter as this is placed to handle scheduler_hint extension for V2.1. + def server_create(self, server_dict, create_kwargs, body_deprecated_param): create_kwargs['user_data'] = server_dict.get(ATTRIBUTE_NAME) diff --git a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py index c386fcc2fb..7177ef7cbd 100644 --- a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py +++ b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py @@ -15,8 +15,12 @@ import datetime +from oslo.config import cfg + from nova.api.openstack import compute -from nova.api.openstack.compute import servers +from nova.api.openstack.compute import plugins +from nova.api.openstack.compute.plugins.v3 import servers as servers_v21 +from nova.api.openstack.compute import servers as servers_v2 from nova.api.openstack import extensions import nova.compute.api from nova.compute import flavors @@ -31,16 +35,22 @@ UUID = fakes.FAKE_UUID -class SchedulerHintsTestCase(test.TestCase): +CONF = cfg.CONF + + +class SchedulerHintsTestCaseV21(test.TestCase): def setUp(self): - super(SchedulerHintsTestCase, self).setUp() + super(SchedulerHintsTestCaseV21, self).setUp() self.fake_instance = fakes.stub_instance(1, uuid=UUID) - self.flags( - osapi_compute_extension=[ - 'nova.api.openstack.compute.contrib.select_extensions'], - osapi_compute_ext_list=['Scheduler_hints']) - self.app = compute.APIRouter(init_only=('servers',)) + self._set_up_router() + + def _set_up_router(self): + self.app = compute.APIRouterV3(init_only=('servers', + 'os-scheduler-hints')) + + def _get_request(self): + return fakes.HTTPRequestV3.blank('/servers') def test_create_server_without_hints(self): @@ -50,7 +60,7 @@ def fake_create(*args, **kwargs): self.stubs.Set(nova.compute.api.API, 'create', fake_create) - req = fakes.HTTPRequest.blank('/fake/servers') + req = self._get_request() req.method = 'POST' req.content_type = 'application/json' body = {'server': { @@ -71,7 +81,7 @@ def fake_create(*args, **kwargs): self.stubs.Set(nova.compute.api.API, 'create', fake_create) - req = fakes.HTTPRequest.blank('/fake/servers') + req = self._get_request() req.method = 'POST' req.content_type = 'application/json' body = { @@ -88,7 +98,7 @@ def fake_create(*args, **kwargs): self.assertEqual(202, res.status_int) def test_create_server_bad_hints(self): - req = fakes.HTTPRequest.blank('/fake/servers') + req = self._get_request() req.method = 'POST' req.content_type = 'application/json' body = { @@ -105,16 +115,27 @@ def test_create_server_bad_hints(self): self.assertEqual(400, res.status_int) -class ServersControllerCreateTest(test.TestCase): +class SchedulerHintsTestCaseV2(SchedulerHintsTestCaseV21): + + def _set_up_router(self): + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Scheduler_hints']) + self.app = compute.APIRouter(init_only=('servers',)) + + def _get_request(self): + return fakes.HTTPRequest.blank('/fake/servers') + + +class ServersControllerCreateTestV21(test.TestCase): def setUp(self): """Shared implementation for tests below that create instance.""" - super(ServersControllerCreateTest, self).setUp() + super(ServersControllerCreateTestV21, self).setUp() self.instance_cache_num = 0 - self.ext_mgr = extensions.ExtensionManager() - self.ext_mgr.extensions = {} - self.controller = servers.Controller(self.ext_mgr) + self._set_up_controller() def instance_create(context, inst): inst_type = flavors.get_flavor_by_flavor_id(3) @@ -146,16 +167,30 @@ def instance_create(context, inst): fake.stub_out_image_service(self.stubs) self.stubs.Set(db, 'instance_create', instance_create) + def _set_up_controller(self): + ext_info = plugins.LoadedExtensionInfo() + CONF.set_override('extensions_blacklist', 'os-scheduler-hints', + 'osapi_v3') + self.no_scheduler_hints_controller = servers_v21.ServersController( + extension_info=ext_info) + + def _verify_availability_zone(self, **kwargs): + self.assertNotIn('scheduler_hints', kwargs) + + def _get_request(self): + return fakes.HTTPRequestV3.blank('/servers') + def _test_create_extra(self, params): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) body = dict(server=server) body.update(params) - req = fakes.HTTPRequest.blank('/fake//servers') + req = self._get_request() req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" - server = self.controller.create(req, body=body).obj['server'] + server = self.no_scheduler_hints_controller.create( + req, body=body).obj['server'] def test_create_instance_with_scheduler_hints_disabled(self): hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'} @@ -163,8 +198,23 @@ def test_create_instance_with_scheduler_hints_disabled(self): old_create = nova.compute.api.API.create def create(*args, **kwargs): - self.assertEqual(kwargs['scheduler_hints'], {}) + self._verify_availability_zone(**kwargs) return old_create(*args, **kwargs) self.stubs.Set(nova.compute.api.API, 'create', create) self._test_create_extra(params) + + +class ServersControllerCreateTestV2(ServersControllerCreateTestV21): + + def _set_up_controller(self): + self.ext_mgr = extensions.ExtensionManager() + self.ext_mgr.extensions = {} + self.no_scheduler_hints_controller = servers_v2.Controller( + self.ext_mgr) + + def _verify_availability_zone(self, **kwargs): + self.assertEqual(kwargs['scheduler_hints'], {}) + + def _get_request(self): + return fakes.HTTPRequest.blank('/fake/servers') diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py b/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py deleted file mode 100644 index a133a20b70..0000000000 --- a/nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -from oslo.config import cfg - -from nova.api.openstack import compute -from nova.api.openstack.compute import plugins -from nova.api.openstack.compute.plugins.v3 import servers -from nova.compute import api as compute_api -from nova.compute import flavors -from nova import db -import nova.db.api -from nova.network import manager -from nova.openstack.common import jsonutils -from nova import test -from nova.tests.api.openstack import fakes -from nova.tests import fake_instance -from nova.tests.image import fake - - -CONF = cfg.CONF -FAKE_UUID = fakes.FAKE_UUID - - -def fake_gen_uuid(): - return FAKE_UUID - - -def return_security_group(context, instance_id, security_group_id): - pass - - -class SchedulerHintsTestCase(test.TestCase): - - def setUp(self): - super(SchedulerHintsTestCase, self).setUp() - self.fake_instance = fakes.stub_instance(1, uuid=FAKE_UUID) - self.app = compute.APIRouterV3(init_only=('servers', - 'os-scheduler-hints')) - - def test_create_server_without_hints(self): - - def fake_create(*args, **kwargs): - self.assertEqual(kwargs['scheduler_hints'], {}) - return ([self.fake_instance], '') - - self.stubs.Set(nova.compute.api.API, 'create', fake_create) - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.content_type = 'application/json' - body = {'server': { - 'name': 'server_test', - 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', - 'flavorRef': '1', - }} - - req.body = jsonutils.dumps(body) - res = req.get_response(self.app) - self.assertEqual(202, res.status_int) - - def test_create_server_with_hints(self): - hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'} - - def fake_create(*args, **kwargs): - self.assertEqual(hints, kwargs['scheduler_hints']) - return ([self.fake_instance], '') - - self.stubs.Set(nova.compute.api.API, 'create', fake_create) - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.content_type = 'application/json' - body = { - 'server': { - 'name': 'server_test', - 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', - 'flavorRef': '1', - 'os-scheduler-hints:scheduler_hints': hints, - }, - } - - req.body = jsonutils.dumps(body) - res = req.get_response(self.app) - self.assertEqual(202, res.status_int) - - def test_create_server_bad_hints(self): - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.content_type = 'application/json' - body = { - 'server': { - 'name': 'server_test', - 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', - 'flavorRef': '1', - 'os-scheduler-hints:scheduler_hints': 'non-dict', - }, - } - - req.body = jsonutils.dumps(body) - res = req.get_response(self.app) - self.assertEqual(400, res.status_int) - - -class ServersControllerCreateTest(test.TestCase): - - def setUp(self): - """Shared implementation for tests below that create instance.""" - super(ServersControllerCreateTest, self).setUp() - - self.flags(verbose=True, - enable_instance_password=True) - self.instance_cache_num = 0 - self.instance_cache_by_id = {} - self.instance_cache_by_uuid = {} - - ext_info = plugins.LoadedExtensionInfo() - self.controller = servers.ServersController(extension_info=ext_info) - CONF.set_override('extensions_blacklist', 'os-scheduler-hints', - 'osapi_v3') - self.no_scheduler_hints_controller = servers.ServersController( - extension_info=ext_info) - - def instance_create(context, inst): - inst_type = flavors.get_flavor_by_flavor_id(3) - image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - def_image_ref = 'http://localhost/images/%s' % image_uuid - self.instance_cache_num += 1 - instance = fake_instance.fake_db_instance(**{ - 'id': self.instance_cache_num, - 'display_name': inst['display_name'] or 'test', - 'uuid': FAKE_UUID, - 'instance_type': dict(inst_type), - 'access_ip_v4': '1.2.3.4', - 'access_ip_v6': 'fead::1234', - 'image_ref': inst.get('image_ref', def_image_ref), - 'user_id': 'fake', - 'project_id': 'fake', - 'reservation_id': inst['reservation_id'], - "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), - "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), - "config_drive": None, - "progress": 0, - "fixed_ips": [], - "task_state": "", - "vm_state": "", - "root_device_name": inst.get('root_device_name', 'vda'), - }) - - self.instance_cache_by_id[instance['id']] = instance - self.instance_cache_by_uuid[instance['uuid']] = instance - return instance - - def instance_get(context, instance_id): - """Stub for compute/api create() pulling in instance after - scheduling - """ - return self.instance_cache_by_id[instance_id] - - def instance_update(context, uuid, values): - instance = self.instance_cache_by_uuid[uuid] - instance.update(values) - return instance - - def server_update(context, instance_uuid, params): - inst = self.instance_cache_by_uuid[instance_uuid] - inst.update(params) - return (inst, inst) - - def fake_method(*args, **kwargs): - pass - - def project_get_networks(context, user_id): - return dict(id='1', host='localhost') - - def queue_get_for(context, *args): - return 'network_topic' - - fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_key_pair_funcs(self.stubs) - fake.stub_out_image_service(self.stubs) - fakes.stub_out_nw_api(self.stubs) - self.stubs.Set(uuid, 'uuid4', fake_gen_uuid) - self.stubs.Set(db, 'instance_add_security_group', - return_security_group) - self.stubs.Set(db, 'project_get_networks', - project_get_networks) - self.stubs.Set(db, 'instance_create', instance_create) - self.stubs.Set(db, 'instance_system_metadata_update', - fake_method) - self.stubs.Set(db, 'instance_get', instance_get) - self.stubs.Set(db, 'instance_update', instance_update) - self.stubs.Set(db, 'instance_update_and_get_original', - server_update) - self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip', - fake_method) - - def _test_create_extra(self, params, no_image=False, - override_controller=None): - image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' - server = dict(name='server_test', imageRef=image_uuid, flavorRef=2) - if no_image: - server.pop('imageRef', None) - server.update(params) - body = dict(server=server) - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - if override_controller: - server = override_controller.create(req, body=body).obj['server'] - else: - server = self.controller.create(req, body=body).obj['server'] - - def test_create_instance_with_scheduler_hints_disabled(self): - hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'} - params = {'os-scheduler-hints:scheduler_hints': hints} - old_create = compute_api.API.create - - def create(*args, **kwargs): - self.assertNotIn('scheduler_hints', kwargs) - # self.assertEqual(kwargs['scheduler_hints'], {}) - return old_create(*args, **kwargs) - - self.stubs.Set(compute_api.API, 'create', create) - self._test_create_extra(params, - override_controller=self.no_scheduler_hints_controller) - - def test_create_instance_with_scheduler_hints_enabled(self): - hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'} - params = {'os-scheduler-hints:scheduler_hints': hints} - old_create = compute_api.API.create - - def create(*args, **kwargs): - self.assertEqual(kwargs['scheduler_hints'], hints) - return old_create(*args, **kwargs) - - self.stubs.Set(compute_api.API, 'create', create) - self._test_create_extra(params) diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl index cfba4ee9e2..a381df7444 100644 --- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl @@ -2,9 +2,9 @@ "server" : { "name" : "new-server-test", "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s", - "flavorRef" : "%(host)s/openstack/flavors/1", - "os-scheduler-hints:scheduler_hints": { - "same_host": "%(uuid)s" - } + "flavorRef" : "%(host)s/openstack/flavors/1" + }, + "OS-SCH-HNT:scheduler_hints": { + "same_host": "%(uuid)s" } } From 567e0ca39e48e574920e2ab2a8856ff8e021a5df Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 20 Aug 2014 05:40:20 +0000 Subject: [PATCH 454/486] Remove duplicated code in test_user_data In test_user_data, there is duplicated code for creating a request. In addition, there is an internal method which works as almost same as the duplicated code. This patch makes the tests use the internal method and removes the duplicated code for cleanup. Change-Id: Ie87cc14a3ac67bc1262de155dc0a1dcf5c5e7d9a --- .../compute/plugins/v3/test_user_data.py | 49 +++---------------- 1 file changed, 6 insertions(+), 43 deletions(-) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py b/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py index 4b47d07075..366471b668 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py @@ -156,6 +156,7 @@ def _test_create_extra(self, params, no_image=False, server = override_controller.create(req, body=body).obj['server'] else: server = self.controller.create(req, body=body).obj['server'] + return server def test_create_instance_with_user_data_disabled(self): params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')} @@ -182,51 +183,13 @@ def create(*args, **kwargs): self._test_create_extra(params) def test_create_instance_with_user_data(self): - image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - flavor_ref = 'http://localhost/flavors/3' - value = "A random string" - body = { - 'server': { - 'name': 'user_data_test', - 'imageRef': image_href, - 'flavorRef': flavor_ref, - 'metadata': { - 'hello': 'world', - 'open': 'stack', - }, - user_data.ATTRIBUTE_NAME: base64.b64encode(value), - }, - } - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" - res = self.controller.create(req, body=body).obj - - server = res['server'] + value = base64.b64encode("A random string") + params = {user_data.ATTRIBUTE_NAME: value} + server = self._test_create_extra(params) self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_with_bad_user_data(self): - image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' - flavor_ref = 'http://localhost/flavors/3' value = "A random string" - body = { - 'server': { - 'name': 'user_data_test', - 'imageRef': image_href, - 'flavorRef': flavor_ref, - 'metadata': { - 'hello': 'world', - 'open': 'stack', - }, - user_data.ATTRIBUTE_NAME: value, - }, - } - - req = fakes.HTTPRequestV3.blank('/servers') - req.method = 'POST' - req.body = jsonutils.dumps(body) - req.headers["content-type"] = "application/json" + params = {user_data.ATTRIBUTE_NAME: value} self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.create, req, body=body) + self._test_create_extra, params) From ecce888c469c62374a3cc43e3cede11d8aa1e799 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 5 Aug 2014 14:38:31 +0200 Subject: [PATCH 455/486] libvirt: support live migrations of instances with config drives In case of shared storage, to allow an instance with a configdrive to be migrated if we must store the configdrive into the same backend as other disks. Related to bug #1246201 Change-Id: I699f0746da03f7ef669d68665996c839d4d99ee4 --- nova/exception.py | 6 ------ nova/tests/virt/libvirt/test_driver.py | 27 +++++++------------------- nova/virt/libvirt/driver.py | 27 ++++++++++++++++++-------- 3 files changed, 26 insertions(+), 34 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 52302469a4..beb49d09c0 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1606,12 +1606,6 @@ class InvalidWatchdogAction(Invalid): msg_fmt = _("Provided watchdog action (%(action)s) is not supported.") -class NoLiveMigrationForConfigDriveInLibVirt(NovaException): - msg_fmt = _("Live migration of instances with config drives is not " - "supported in libvirt unless libvirt instance path and " - "drive data is shared across compute nodes.") - - class LiveMigrationWithOldNovaNotSafe(NovaException): msg_fmt = _("Host %(server)s is running an old version of Nova, " "live migrations involving that version may cause data loss. " diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index 280e7da317..b16f3f63bd 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -4801,26 +4801,6 @@ def fake_none(*args, **kwargs): 'vnc': '127.0.0.1'}} self.assertEqual(result, target_res) - def test_pre_live_migration_block_with_config_drive_mocked(self): - # Creating testdata - vol = {'block_device_mapping': [ - {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, - {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - - def fake_true(*args, **kwargs): - return True - - self.stubs.Set(configdrive, 'required_by', fake_true) - - inst_ref = {'id': 'foo'} - c = context.get_admin_context() - - self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt, - conn.pre_live_migration, c, inst_ref, vol, None, - None, {'is_shared_instance_path': False, - 'is_shared_block_storage': False}) - def test_pre_live_migration_vol_backed_works_correctly_mocked(self): # Creating testdata, using temp dir. with utils.tempdir() as tmpdir: @@ -10470,6 +10450,13 @@ def test_rescue_config_drive(self): cdb.make_drive(mox.Regex(configdrive_path)) cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg() ).AndReturn(None) + + imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw' + ).AndReturn(fake_imagebackend.Raw()) + imagebackend.Image.cache(fetch_func=mox.IgnoreArg(), + context=mox.IgnoreArg(), + filename='disk.config.rescue') + image_meta = {'id': 'fake', 'name': 'fake'} self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 6d197da81d..6f7a982993 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2953,6 +2953,16 @@ def clone_fallback_to_fetch(*args, **kwargs): 'with error: %s'), e, instance=instance) + def dummy_fetch_func(target, *args, **kwargs): + # NOTE(sileht): this is never called because the + # the target have already been created by + # cdb.make_drive call + pass + + raw('disk.config').cache(fetch_func=dummy_fetch_func, + context=context, + filename='disk.config' + suffix) + # File injection only if needed elif inject_files and CONF.libvirt.inject_partition != -2: if booted_from_volume: @@ -3273,11 +3283,19 @@ def _get_guest_storage_config(self, instance, image_meta, vol.save() if 'disk.config' in disk_mapping: + # NOTE(sileht): a configdrive is a raw image + # it works well with rbd, lvm and raw images_type + # but we must force to raw image_type if the desired + # images_type is qcow2 + if CONF.libvirt.images_type not in ['rbd', 'lvm']: + image_type = "raw" + else: + image_type = None diskconfig = self._get_guest_disk_config(instance, 'disk.config', disk_mapping, inst_type, - 'raw') + image_type) devices.append(diskconfig) for d in devices: @@ -4931,13 +4949,6 @@ def pre_live_migration(self, context, instance, block_device_info, is_block_migration = migrate_data.get('block_migration', True) instance_relative_path = migrate_data.get('instance_relative_path') - if not (is_shared_instance_path and is_shared_block_storage): - # NOTE(mikal): live migration of instances using config drive is - # not supported because of a bug in libvirt (read only devices - # are not copied by libvirt). See bug/1246201 - if configdrive.required_by(instance): - raise exception.NoLiveMigrationForConfigDriveInLibVirt() - if not is_shared_instance_path: # NOTE(mikal): this doesn't use libvirt_utils.get_instance_path # because we are ensuring that the same instance directory name From 7907ade35c1066ebc012bb4c5c24c005b3f2902b Mon Sep 17 00:00:00 2001 From: jichenjc Date: Fri, 22 Aug 2014 16:55:01 +0800 Subject: [PATCH 456/486] Create compute api var at __init__ No need to create compute api object at every func call. Change-Id: I723cb5e26b2de68e1ea8bdb8505567030eb98406 --- nova/api/openstack/compute/contrib/server_diagnostics.py | 9 ++++++--- .../openstack/compute/plugins/v3/server_diagnostics.py | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py index 96f5c68627..330cff14d7 100644 --- a/nova/api/openstack/compute/contrib/server_diagnostics.py +++ b/nova/api/openstack/compute/contrib/server_diagnostics.py @@ -38,18 +38,21 @@ def construct(self): class ServerDiagnosticsController(object): + def __init__(self): + self.compute_api = compute.API() + @wsgi.serializers(xml=ServerDiagnosticsTemplate) def index(self, req, server_id): context = req.environ["nova.context"] authorize(context) - compute_api = compute.API() try: - instance = compute_api.get(context, server_id, want_objects=True) + instance = self.compute_api.get(context, server_id, + want_objects=True) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) try: - return compute_api.get_diagnostics(context, instance) + return self.compute_api.get_diagnostics(context, instance) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'get_diagnostics') diff --git a/nova/api/openstack/compute/plugins/v3/server_diagnostics.py b/nova/api/openstack/compute/plugins/v3/server_diagnostics.py index 4ca26fa3a7..57fe3ae406 100644 --- a/nova/api/openstack/compute/plugins/v3/server_diagnostics.py +++ b/nova/api/openstack/compute/plugins/v3/server_diagnostics.py @@ -27,18 +27,21 @@ class ServerDiagnosticsController(object): + def __init__(self): + self.compute_api = compute.API() + @extensions.expected_errors((404, 409, 501)) def index(self, req, server_id): context = req.environ["nova.context"] authorize(context) - compute_api = compute.API() try: - instance = compute_api.get(context, server_id, want_objects=True) + instance = self.compute_api.get(context, server_id, + want_objects=True) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) try: - return compute_api.get_instance_diagnostics(context, instance) + return self.compute_api.get_instance_diagnostics(context, instance) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'get_diagnostics') From cad9e77091dfa896ad59233341e8947eca13f66b Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 22 Aug 2014 18:05:05 +0900 Subject: [PATCH 457/486] Fix V2 unit tests to test hypervisor API as admin Hypervisors API are Admin API and unit tests should test those accordingly. But All the V2 hypervisors Unit tests tests those as a non Admin API This patch fix this issue and add more non-admin unit tests Change-Id: I5c29828d567e2044786d4bcbff4970219c6e8c5f Closes-Bug: #1360113 --- .../contrib/test_extended_hypervisors.py | 3 +- .../compute/contrib/test_hypervisors.py | 62 ++++++++++++++++--- nova/tests/fake_policy.py | 2 +- 3 files changed, 55 insertions(+), 12 deletions(-) diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py index 72be69744c..fe6586c29b 100644 --- a/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py +++ b/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py @@ -91,7 +91,8 @@ def test_detail(self): host_ip='2.2.2.2')])) def test_show_withid(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1', + use_admin_context=True) result = self.controller.show(req, '1') self.assertEqual(result, dict(hypervisor=dict( diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py index 80f4991754..c6814ce24f 100644 --- a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py +++ b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py @@ -20,7 +20,6 @@ from nova.api.openstack import extensions from nova import context from nova import db -from nova.db.sqlalchemy import api as db_api from nova import exception from nova import test from nova.tests.api.openstack import fakes @@ -83,7 +82,6 @@ dict(name="inst4", uuid="uuid4", host="compute2")] -@db_api.require_admin_context def fake_compute_node_get_all(context): return TEST_HYPERS @@ -200,6 +198,12 @@ def test_index(self): dict(id=1, hypervisor_hostname="hyper1"), dict(id=2, hypervisor_hostname="hyper2")])) + def test_index_non_admin(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors', + use_admin_context=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.index, req) + def test_detail(self): req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail', use_admin_context=True) @@ -241,12 +245,20 @@ def test_detail(self): cpu_info='cpu_info', disk_available_least=100)])) + def test_detail_non_admin(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail', + use_admin_context=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.detail, req) + def test_show_noid(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3', + use_admin_context=True) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3') def test_show_withid(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1', + use_admin_context=True) result = self.controller.show(req, '1') self.assertEqual(result, dict(hypervisor=dict( @@ -268,8 +280,15 @@ def test_show_withid(self): cpu_info='cpu_info', disk_available_least=100))) + def test_show_non_admin(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1', + use_admin_context=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.show, req, '1') + def test_uptime_noid(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3', + use_admin_context=True) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3') def test_uptime_notimplemented(self): @@ -279,7 +298,8 @@ def fake_get_host_uptime(context, hyp): self.stubs.Set(self.controller.host_api, 'get_host_uptime', fake_get_host_uptime) - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1', + use_admin_context=True) self.assertRaises(exc.HTTPNotImplemented, self.controller.uptime, req, '1') @@ -290,7 +310,8 @@ def fake_get_host_uptime(context, hyp): self.stubs.Set(self.controller.host_api, 'get_host_uptime', fake_get_host_uptime) - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1', + use_admin_context=True) result = self.controller.uptime(req, '1') self.assertEqual(result, dict(hypervisor=dict( @@ -298,8 +319,15 @@ def fake_get_host_uptime(context, hyp): hypervisor_hostname="hyper1", uptime="fake uptime"))) + def test_uptime_non_admin(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1', + use_admin_context=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.uptime, req, '1') + def test_search(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/search') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/search', + use_admin_context=True) result = self.controller.search(req, 'hyper') self.assertEqual(result, dict(hypervisors=[ @@ -307,7 +335,8 @@ def test_search(self): dict(id=2, hypervisor_hostname="hyper2")])) def test_servers(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers', + use_admin_context=True) result = self.controller.servers(req, 'hyper') self.assertEqual(result, dict(hypervisors=[ @@ -322,8 +351,15 @@ def test_servers(self): dict(name="inst2", uuid="uuid2"), dict(name="inst4", uuid="uuid4")])])) + def test_servers_non_admin(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers', + use_admin_context=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.servers, req, '1') + def test_statistics(self): - req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics') + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics', + use_admin_context=True) result = self.controller.statistics(req) self.assertEqual(result, dict(hypervisor_statistics=dict( @@ -340,6 +376,12 @@ def test_statistics(self): running_vms=4, disk_available_least=200))) + def test_statistics_non_admin(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics', + use_admin_context=False) + self.assertRaises(exception.PolicyNotAuthorized, + self.controller.statistics, req) + class HypervisorsSerializersTest(test.NoDBTestCase): def compare_to_exemplar(self, exemplar, hyper): diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index f4d74218ba..7e98cfef8f 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -218,7 +218,7 @@ "compute_extension:v3:os-hide-server-addresses": "", "compute_extension:hosts": "", "compute_extension:v3:os-hosts": "rule:admin_api", - "compute_extension:hypervisors": "", + "compute_extension:hypervisors": "rule:admin_api", "compute_extension:v3:os-hypervisors": "rule:admin_api", "compute_extension:image_size": "", "compute_extension:instance_actions": "", From d6fbc792446b0ee78217ee3769b1629732735899 Mon Sep 17 00:00:00 2001 From: Peter Krempa Date: Fri, 22 Aug 2014 13:38:10 +0200 Subject: [PATCH 458/486] Fix typo in comment While looking at the code I noticed a typo in a comment. Change-Id: Id31c3a87ca5c902699167bd2d63e8dea6d62ba23 --- nova/tests/virt/xenapi/test_xenapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index bc7bd3b15b..0453d222db 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -974,7 +974,7 @@ def _mount_handler(cmd, *ignore_args, **ignore_kwargs): return '', '' def _umount_handler(cmd, *ignore_args, **ignore_kwargs): - # Umount would normall make files in the m,ounted filesystem + # Umount would normally make files in the mounted filesystem # disappear, so do that here LOG.debug('Removing simulated guest agent files in %s', self._tmpdir) From 64520ceeacdb42b66d3b15474124d273d9e8ccf2 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Wed, 18 Jun 2014 13:19:57 +0100 Subject: [PATCH 459/486] XenAPI: Remove interrupted snapshots Currently the VDI chain can grow very long when a snapshots happen at the same time as nova-compute being terminated. While we now clean up the instance state, the VDI chain is left in a bad state, it has an extra snapshot that is no longer required. This change improves that by looking at when we detect a failed snapshot, we go back and tidy up the VDI chain. Partial-Bug: #1331440 Change-Id: I9bae82048910d8c45bc2a4093064c1ac68f15750 --- nova/compute/manager.py | 9 +++ nova/tests/compute/test_compute_mgr.py | 3 + nova/tests/virt/test_virt_drivers.py | 6 ++ nova/tests/virt/xenapi/test_driver.py | 38 +++++----- nova/tests/virt/xenapi/test_vm_utils.py | 96 +++++++++++++++++++++++++ nova/tests/virt/xenapi/test_vmops.py | 12 ++++ nova/virt/driver.py | 8 +++ nova/virt/xenapi/driver.py | 4 ++ nova/virt/xenapi/vm_utils.py | 48 ++++++++++++- nova/virt/xenapi/vmops.py | 5 ++ 10 files changed, 210 insertions(+), 19 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7622aae7ae..7b59a26853 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -855,6 +855,12 @@ def _init_instance(self, context, instance): LOG.debug("Instance in transitional state %s at start-up " "clearing task state", instance['task_state'], instance=instance) + try: + self._post_interrupted_snapshot_cleanup(context, instance) + except Exception: + # we don't want that an exception blocks the init_host + msg = _LE('Failed to cleanup snapshot.') + LOG.exception(msg, instance=instance) instance.task_state = None instance.save() @@ -2939,6 +2945,9 @@ def update_task_state(task_state, msg = _("Image not found during snapshot") LOG.warn(msg, instance=instance) + def _post_interrupted_snapshot_cleanup(self, context, instance): + self.driver.post_interrupted_snapshot_cleanup(context, instance) + @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_create(self, context, instance, volume_id, diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 5a721c0391..370e4efcfc 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -484,10 +484,13 @@ def test_init_instance_sets_building_tasks_error_spawning(self): def _test_init_instance_cleans_image_states(self, instance): with mock.patch.object(instance, 'save') as save: self.compute._get_power_state = mock.Mock() + self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock() instance.info_cache = None instance.power_state = power_state.RUNNING self.compute._init_instance(self.context, instance) save.assert_called_once_with() + self.compute.driver.post_interrupted_snapshot_cleanup.\ + assert_called_once_with(self.context, instance) self.assertIsNone(instance.task_state) def test_init_instance_cleans_image_state_pending_upload(self): diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py index 9212807ac5..a8062bdbcc 100644 --- a/nova/tests/virt/test_virt_drivers.py +++ b/nova/tests/virt/test_virt_drivers.py @@ -263,6 +263,12 @@ def test_snapshot_running(self): self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'], lambda *args, **kwargs: None) + @catch_notimplementederror + def test_post_interrupted_snapshot_cleanup(self): + instance_ref, network_info = self._get_running_instance() + self.connection.post_interrupted_snapshot_cleanup(self.ctxt, + instance_ref) + @catch_notimplementederror def test_reboot(self): reboot_type = "SOFT" diff --git a/nova/tests/virt/xenapi/test_driver.py b/nova/tests/virt/xenapi/test_driver.py index 8e5c538563..7276cc4b5a 100644 --- a/nova/tests/virt/xenapi/test_driver.py +++ b/nova/tests/virt/xenapi/test_driver.py @@ -15,6 +15,8 @@ import math +import mock + from nova.openstack.common import units from nova.tests.virt import test_driver from nova.tests.virt.xenapi import stubs @@ -27,6 +29,12 @@ class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB, test_driver.DriverAPITestHelper): """Unit tests for Driver operations.""" + def _get_driver(self): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + self.flags(connection_url='test_url', + connection_password='test_pass', group='xenserver') + return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) + def host_stats(self, refresh=True): return {'host_memory_total': 3 * units.Mi, 'host_memory_free_computed': 2 * units.Mi, @@ -40,11 +48,7 @@ def host_stats(self, refresh=True): 'pci_passthrough_devices': ''} def test_available_resource(self): - self.flags(connection_url='test_url', - connection_password='test_pass', group='xenserver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - - driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) + driver = self._get_driver() driver._session.product_version = (6, 8, 2) self.stubs.Set(driver, 'get_host_stats', self.host_stats) @@ -62,10 +66,7 @@ def test_available_resource(self): self.assertEqual(1, resources['disk_available_least']) def test_overhead(self): - self.flags(connection_url='test_url', - connection_password='test_pass', group='xenserver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) + driver = self._get_driver() instance = {'memory_mb': 30720, 'vcpus': 4} # expected memory overhead per: @@ -78,10 +79,7 @@ def test_overhead(self): self.assertEqual(expected, overhead['memory_mb']) def test_set_bootable(self): - self.flags(connection_url='test_url', connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) + driver = self._get_driver() self.mox.StubOutWithMock(driver._vmops, 'set_bootable') driver._vmops.set_bootable('inst', True) @@ -89,9 +87,15 @@ def test_set_bootable(self): driver.set_bootable('inst', True) + def test_post_interrupted_snapshot_cleanup(self): + driver = self._get_driver() + fake_vmops_cleanup = mock.Mock() + driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup + + driver.post_interrupted_snapshot_cleanup("context", "instance") + + fake_vmops_cleanup.assert_called_once_with("context", "instance") + def test_public_api_signatures(self): - self.flags(connection_url='test_url', connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) - inst = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) + inst = self._get_driver() self.assertPublicAPISignatures(inst) diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py index ee95fec38b..c3ba4e5150 100644 --- a/nova/tests/virt/xenapi/test_vm_utils.py +++ b/nova/tests/virt/xenapi/test_vm_utils.py @@ -2310,6 +2310,102 @@ def test_list_vms(self): self.assertIn(vm_ref, result_keys) +class ChildVHDsTestCase(test.NoDBTestCase): + all_vdis = [ + ("my-vdi-ref", + {"uuid": "my-uuid", "sm_config": {}, + "is_a_snapshot": False, "other_config": {}}), + ("non-parent", + {"uuid": "uuid-1", "sm_config": {}, + "is_a_snapshot": False, "other_config": {}}), + ("diff-parent", + {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"}, + "is_a_snapshot": False, "other_config": {}}), + ("child", + {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"}, + "is_a_snapshot": False, "other_config": {}}), + ("child-snap", + {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"}, + "is_a_snapshot": True, "other_config": {}}), + ] + + @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') + def test_child_vhds_defaults(self, mock_get_all): + mock_get_all.return_value = self.all_vdis + + result = vm_utils._child_vhds("session", "sr_ref", "my-uuid") + + self.assertEqual(['uuid-child', 'uuid-child-snap'], result) + + @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') + def test_child_vhds_only_snapshots(self, mock_get_all): + mock_get_all.return_value = self.all_vdis + + result = vm_utils._child_vhds("session", "sr_ref", "my-uuid", + old_snapshots_only=True) + + self.assertEqual(['uuid-child-snap'], result) + + def test_is_vdi_a_snapshot_works(self): + vdi_rec = {"is_a_snapshot": True, + "other_config": {}} + + self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec)) + + def test_is_vdi_a_snapshot_base_images_false(self): + vdi_rec = {"is_a_snapshot": True, + "other_config": {"image-id": "fake"}} + + self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) + + def test_is_vdi_a_snapshot_false_for_non_snapshot(self): + vdi_rec = {"is_a_snapshot": False, + "other_config": {}} + + self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) + + +class RemoveOldSnapshotsTestCase(test.NoDBTestCase): + + @mock.patch.object(vm_utils, '_child_vhds') + @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') + @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') + @mock.patch.object(vm_utils, 'safe_find_sr') + def test_get_snapshots_for_vm(self, mock_find, mock_get_vdi, + mock_parent, mock_child_vhds): + session = mock.Mock() + instance = {"uuid": "uuid"} + mock_find.return_value = "sr_ref" + mock_get_vdi.return_value = ("vm_vdi_ref", "vm_vdi_rec") + mock_parent.return_value = "parent_uuid" + mock_child_vhds.return_value = [] + + result = vm_utils._get_snapshots_for_vm(session, instance, "vm_ref") + + self.assertEqual([], result) + mock_find.assert_called_once_with(session) + mock_get_vdi.assert_called_once_with(session, "vm_ref") + mock_parent.assert_called_once_with(session, "vm_vdi_ref") + mock_child_vhds.assert_called_once_with(session, "sr_ref", + "parent_uuid", old_snapshots_only=True) + + @mock.patch.object(vm_utils, 'scan_default_sr') + @mock.patch.object(vm_utils, 'safe_destroy_vdis') + @mock.patch.object(vm_utils, '_get_snapshots_for_vm') + def test_remove_old_snapshots(self, mock_get, mock_destroy, mock_scan): + session = mock.Mock() + instance = {"uuid": "uuid"} + mock_get.return_value = ["vdi_uuid1", "vdi_uuid2"] + session.VDI.get_by_uuid.return_value = "vdi_ref" + + vm_utils.remove_old_snapshots(session, instance, "vm_ref") + + self.assertTrue(mock_scan.called) + session.VDI.get_by_uuid.assert_called_once_with("vdi_uuid1") + mock_destroy.assert_called_once_with(session, ["vdi_ref"]) + mock_scan.assert_called_once_with(session) + + class ResizeFunctionTestCase(test.NoDBTestCase): def _call_get_resize_func_name(self, brand, version): session = mock.Mock() diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py index 7fcea40f70..e0e7cbc33d 100644 --- a/nova/tests/virt/xenapi/test_vmops.py +++ b/nova/tests/virt/xenapi/test_vmops.py @@ -933,6 +933,18 @@ def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes, None, 5, 1000) +@mock.patch.object(vm_utils, 'remove_old_snapshots') +class CleanupFailedSnapshotTestCase(VMOpsTestBase): + def test_post_interrupted_snapshot_cleanup(self, mock_remove): + self.vmops._get_vm_opaque_ref = mock.Mock() + self.vmops._get_vm_opaque_ref.return_value = "vm_ref" + + self.vmops.post_interrupted_snapshot_cleanup("context", "instance") + + mock_remove.assert_called_once_with(self.vmops._session, + "instance", "vm_ref") + + class LiveMigrateHelperTestCase(VMOpsTestBase): def test_connect_block_device_volumes_none(self): self.assertEqual({}, self.vmops.connect_block_device_volumes(None)) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 6edba8fdde..f12757ad6a 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -486,6 +486,14 @@ def snapshot(self, context, instance, image_id, update_task_state): """ raise NotImplementedError() + def post_interrupted_snapshot_cleanup(self, context, instance): + """Cleans up any resources left after an interrupted snapshot. + + :param context: security context + :param instance: nova.objects.instance.Instance + """ + pass + def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 8e107ec16d..97aa46f208 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -228,6 +228,10 @@ def snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance.""" self._vmops.snapshot(context, instance, image_id, update_task_state) + def post_interrupted_snapshot_cleanup(self, context, instance): + """Cleans up any resources left after a failed snapshot.""" + self._vmops.post_interrupted_snapshot_cleanup(context, instance) + def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot VM instance.""" diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e7106197dc..3f261a4834 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -728,6 +728,40 @@ def strip_base_mirror_from_vdis(session, vm_ref): _try_strip_base_mirror_from_vdi(session, vdi_ref) +def _get_snapshots_for_vm(session, instance, vm_ref): + sr_ref = safe_find_sr(session) + vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) + parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref) + + if not parent_uuid: + return [] + + return _child_vhds(session, sr_ref, parent_uuid, old_snapshots_only=True) + + +def remove_old_snapshots(session, instance, vm_ref): + """See if there is an snapshot present that should be removed.""" + LOG.debug("Starting remove_old_snapshots for VM", instance=instance) + + snapshot_uuids = _get_snapshots_for_vm(session, instance, vm_ref) + number_of_snapshots = len(snapshot_uuids) + + if number_of_snapshots <= 0: + LOG.debug("No snapshots to remove.", instance=instance) + return + + if number_of_snapshots > 1: + LOG.debug("More snapshots than expected, only deleting one.", + instance=instance) + + vdi_uuid = snapshot_uuids[0] + vdi_ref = session.VDI.get_by_uuid(vdi_uuid) + safe_destroy_vdis(session, [vdi_ref]) + scan_default_sr(session) + # TODO(johnthetubaguy): we could look for older snapshots too + LOG.debug("Removed one old snapshot.", instance=instance) + + @contextlib.contextmanager def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0', post_snapshot_callback=None): @@ -2048,7 +2082,14 @@ def _walk_vdi_chain(session, vdi_uuid): vdi_uuid = parent_uuid -def _child_vhds(session, sr_ref, vdi_uuid): +def _is_vdi_a_snapshot(vdi_rec): + """Ensure VDI is a snapshot, and not cached image.""" + is_a_snapshot = vdi_rec['is_a_snapshot'] + image_id = vdi_rec['other_config'].get('image-id') + return is_a_snapshot and not image_id + + +def _child_vhds(session, sr_ref, vdi_uuid, old_snapshots_only=False): """Return the immediate children of a given VHD. This is not recursive, only the immediate children are returned. @@ -2064,9 +2105,12 @@ def _child_vhds(session, sr_ref, vdi_uuid): if parent_uuid != vdi_uuid: continue + if old_snapshots_only and not _is_vdi_a_snapshot(rec): + continue + children.add(rec_uuid) - return children + return list(children) def _count_parents_children(session, vdi_ref, sr_ref): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 4d276799ed..0cba7cda19 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -752,6 +752,11 @@ def snapshot(self, context, instance, image_id, update_task_state): LOG.debug("Finished snapshot and upload for VM", instance=instance) + def post_interrupted_snapshot_cleanup(self, context, instance): + """Cleans up any resources left after a failed snapshot.""" + vm_ref = self._get_vm_opaque_ref(instance) + vm_utils.remove_old_snapshots(self._session, instance, vm_ref) + def _get_orig_vm_name_label(self, instance): return instance['name'] + '-orig' From 20eb09b73e801ab2cdcb8f0461fb05f653e1d755 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 20 Aug 2014 03:21:23 +0000 Subject: [PATCH 460/486] Allow to create a flavor without specifying id On current v2 API, "create a flavor" API allows a request without specifying "id" parameter. The API of v3 API, which is source of v2.1, doesn't allow it now. However, it will cause a backwards incompatible issue when implementing v2.1 API. This patch makes v3 API allow it. Partially implements blueprint v2-on-v3-api Change-Id: I2d3fda323df57f8c0c116347e3c4a38c807de31b --- .../compute/schemas/v3/flavor_manage.py | 9 +++++++- .../compute/plugins/v3/test_flavor_manage.py | 23 +++++++++++++------ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/nova/api/openstack/compute/schemas/v3/flavor_manage.py b/nova/api/openstack/compute/schemas/v3/flavor_manage.py index 173dd1783c..48c4fb2d83 100644 --- a/nova/api/openstack/compute/schemas/v3/flavor_manage.py +++ b/nova/api/openstack/compute/schemas/v3/flavor_manage.py @@ -62,7 +62,14 @@ }, 'flavor-access:is_public': parameter_types.boolean, }, - 'required': ['name', 'id', 'ram', 'vcpus', 'disk'], + # TODO(oomichi): 'id' should be required with v2.1+microversions. + # On v2.0 API, nova-api generates a flavor-id automatically if + # specifying null as 'id' or not specifying 'id'. Ideally a client + # should specify null as 'id' for requesting auto-generated id + # exactly. However, this strict limitation causes a backwards + # incompatible issue on v2.1. So now here relaxes the requirement + # of 'id'. + 'required': ['name', 'ram', 'vcpus', 'disk'], 'additionalProperties': False, }, }, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py index 0f375e44f4..466caca4f2 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import datetime import webob @@ -167,16 +168,24 @@ def test_create_without_flavor(self): body = {'foo': None} self._test_create_bad_request(body) - def test_create_without_flavorid(self): - expected = self.expected_flavor - expected['flavor']['id'] = None - - res = self._create_flavor_helper(expected) + def _test_create_with_autogenerated_flavorid(self, request_body): + res = self._create_flavor_helper(request_body) body = jsonutils.loads(res.body) - for key in expected["flavor"]: + for key in self.expected_flavor["flavor"]: if key != 'id': - self.assertEqual(body["flavor"][key], expected["flavor"][key]) + self.assertEqual(body["flavor"][key], + self.expected_flavor["flavor"][key]) + + def test_create_with_none_flavorid(self): + body = copy.deepcopy(self.expected_flavor) + body['flavor']['id'] = None + self._test_create_with_autogenerated_flavorid(body) + + def test_create_without_flavorid(self): + body = copy.deepcopy(self.expected_flavor) + del body['flavor']['id'] + self._test_create_with_autogenerated_flavorid(body) def test_flavor_exists_exception_returns_409(self): expected = self.expected_flavor From f31ac7711bacb1408b17759426f3407937ba4d79 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 18 Aug 2014 10:50:00 -0700 Subject: [PATCH 461/486] Add QuotaError handling to servers rebuild API Commit 718a3f057cee0b1163c40fbcbedda29bd2ef9dfe made nova.compute.api.API._check_injected_file_quota raise more specific over-quota exceptions but the rebuild API was never updated to handle the QuotaError exceptions and translate to a proper HTTP error. This change does a few things: 1. Makes the specific file path/content limit exceeded exceptions extend the more generic OnsetFileLimitExceeded exception. 2. Adds the OverQuota checking to the rebuild APIs with tests. 3. Adds unit tests for the three different exceptions raised from _check_injected_file_quota in the compute API since those did not exist before. Closes-Bug: #1358380 Change-Id: I9c72dea6075fcf554abb8e669cf4dd3129176912 --- .../openstack/compute/plugins/v3/servers.py | 4 ++- nova/api/openstack/compute/servers.py | 2 ++ nova/exception.py | 4 +-- .../compute/plugins/v3/test_servers.py | 19 ++++++++++ .../api/openstack/compute/test_servers.py | 19 ++++++++++ nova/tests/compute/test_compute_api.py | 36 +++++++++++++++++++ 6 files changed, 81 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index 2012b8e176..17d4198fad 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -829,7 +829,7 @@ def _action_resize(self, req, id, body): return self._resize(req, id, flavor_ref, **resize_kwargs) - @extensions.expected_errors((400, 404, 409, 413)) + @extensions.expected_errors((400, 403, 404, 409, 413)) @wsgi.response(202) @wsgi.action('rebuild') def _action_rebuild(self, req, id, body): @@ -894,6 +894,8 @@ def _action_rebuild(self, req, id, body): except exception.ImageNotFound: msg = _("Cannot find image for rebuild") raise exc.HTTPBadRequest(explanation=msg) + except exception.QuotaError as error: + raise exc.HTTPForbidden(explanation=error.format_message()) except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 13a66d28f2..5fa81d35a2 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -1393,6 +1393,8 @@ def _action_rebuild(self, req, id, body): except exception.ImageNotFound: msg = _("Cannot find image for rebuild") raise exc.HTTPBadRequest(explanation=msg) + except exception.QuotaError as error: + raise exc.HTTPForbidden(explanation=error.format_message()) except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, diff --git a/nova/exception.py b/nova/exception.py index 322a2a43e5..1139d61533 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1181,11 +1181,11 @@ class OnsetFileLimitExceeded(QuotaError): msg_fmt = _("Personality file limit exceeded") -class OnsetFilePathLimitExceeded(QuotaError): +class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded): msg_fmt = _("Personality file path too long") -class OnsetFileContentLimitExceeded(QuotaError): +class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded): msg_fmt = _("Personality file content too long") diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index ff3436babd..fb69824d4e 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -15,6 +15,7 @@ # under the License. import base64 +import contextlib import copy import datetime import uuid @@ -1483,6 +1484,24 @@ def fake_get_image(self, context, image_href, **kwargs): self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) + def test_rebuild_instance_onset_file_limit_over_quota(self): + def fake_get_image(self, context, image_href, **kwargs): + return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', + name='public image', is_public=True, status='active') + + with contextlib.nested( + mock.patch.object(fake._FakeImageService, 'show', + side_effect=fake_get_image), + mock.patch.object(self.controller.compute_api, 'rebuild', + side_effect=exception.OnsetFileLimitExceeded) + ) as ( + show_mock, rebuild_mock + ): + self.req.body = jsonutils.dumps(self.body) + self.assertRaises(webob.exc.HTTPForbidden, + self.controller._action_rebuild, + self.req, FAKE_UUID, body=self.body) + def test_start(self): self.mox.StubOutWithMock(compute_api.API, 'start') compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg()) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index e5dbdd79ac..e74e6f0ea8 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -16,6 +16,7 @@ # under the License. import base64 +import contextlib import datetime import uuid @@ -1660,6 +1661,24 @@ def fake_get_image(self, context, image_href, **kwargs): self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_rebuild, self.req, FAKE_UUID, self.body) + def test_rebuild_instance_onset_file_limit_over_quota(self): + def fake_get_image(self, context, image_href, **kwargs): + return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', + name='public image', is_public=True, status='active') + + with contextlib.nested( + mock.patch.object(fake._FakeImageService, 'show', + side_effect=fake_get_image), + mock.patch.object(self.controller.compute_api, 'rebuild', + side_effect=exception.OnsetFileLimitExceeded) + ) as ( + show_mock, rebuild_mock + ): + self.req.body = jsonutils.dumps(self.body) + self.assertRaises(webob.exc.HTTPForbidden, + self.controller._action_rebuild, + self.req, FAKE_UUID, body=self.body) + def test_rebuild_instance_with_access_ipv6_bad_format(self): # proper local hrefs must start with 'http://localhost/v2/' self.body['rebuild']['accessIPv4'] = '1.2.3.4' diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index b1d004558f..cd225afe28 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -1974,6 +1974,42 @@ def get_image(context, image_href): None, new_image, flavor, {}, []) self.assertEqual(vm_mode.XEN, instance.vm_mode) + def _test_check_injected_file_quota_onset_file_limit_exceeded(self, + side_effect): + injected_files = [ + { + "path": "/etc/banner.txt", + "contents": "foo" + } + ] + with mock.patch.object(quota.QUOTAS, 'limit_check', + side_effect=side_effect): + self.compute_api._check_injected_file_quota( + self.context, injected_files) + + def test_check_injected_file_quota_onset_file_limit_exceeded(self): + # This is the first call to limit_check. + side_effect = exception.OverQuota(overs='injected_files') + self.assertRaises(exception.OnsetFileLimitExceeded, + self._test_check_injected_file_quota_onset_file_limit_exceeded, + side_effect) + + def test_check_injected_file_quota_onset_file_path_limit(self): + # This is the second call to limit_check. + side_effect = (mock.DEFAULT, + exception.OverQuota(overs='injected_file_path_bytes')) + self.assertRaises(exception.OnsetFilePathLimitExceeded, + self._test_check_injected_file_quota_onset_file_limit_exceeded, + side_effect) + + def test_check_injected_file_quota_onset_file_content_limit(self): + # This is the second call to limit_check but with different overs. + side_effect = (mock.DEFAULT, + exception.OverQuota(overs='injected_file_content_bytes')) + self.assertRaises(exception.OnsetFileContentLimitExceeded, + self._test_check_injected_file_quota_onset_file_limit_exceeded, + side_effect) + @mock.patch('nova.objects.Quotas.commit') @mock.patch('nova.objects.Quotas.reserve') @mock.patch('nova.objects.Instance.save') From ff256f5ca3805209bcbf1d91367f191ca93528c2 Mon Sep 17 00:00:00 2001 From: Claudiu Belu Date: Thu, 17 Jul 2014 19:32:38 +0300 Subject: [PATCH 462/486] Adds Hyper-V Compute Driver soft reboot implementation Adds a soft reboot implementation for feature parity with other Nova drivers as specified in the base virt driver's interface. Co-Authored-By: Simona Iuliana Toader Change-Id: I963cbdacdfefca47d6e45fa0aafac188963bcaed Implements: blueprint hyper-v-soft-reboot --- nova/tests/virt/hyperv/test_vmops.py | 91 ++++++++++++++++++++++++++ nova/tests/virt/hyperv/test_vmutils.py | 21 ++++++ nova/virt/hyperv/constants.py | 2 + nova/virt/hyperv/vmops.py | 78 +++++++++++++++++++++- nova/virt/hyperv/vmutils.py | 18 +++++ nova/virt/hyperv/vmutilsv2.py | 1 + 6 files changed, 209 insertions(+), 2 deletions(-) diff --git a/nova/tests/virt/hyperv/test_vmops.py b/nova/tests/virt/hyperv/test_vmops.py index 518fca8ece..020dd95706 100644 --- a/nova/tests/virt/hyperv/test_vmops.py +++ b/nova/tests/virt/hyperv/test_vmops.py @@ -12,17 +12,22 @@ # License for the specific language governing permissions and limitations # under the License. +from eventlet import timeout as etimeout import mock from nova import exception from nova import test from nova.tests import fake_instance +from nova.virt.hyperv import constants from nova.virt.hyperv import vmops +from nova.virt.hyperv import vmutils class VMOpsTestCase(test.NoDBTestCase): """Unit tests for the Hyper-V VMOps class.""" + _FAKE_TIMEOUT = 0 + def __init__(self, test_case_name): super(VMOpsTestCase, self).__init__(test_case_name) @@ -44,3 +49,89 @@ def test_attach_config_drive(self): self.assertRaises(exception.InvalidDiskFormat, self._vmops.attach_config_drive, instance, 'C:/fake_instance_dir/configdrive.xxx') + + def test_reboot_hard(self): + self._test_reboot(vmops.REBOOT_TYPE_HARD, + constants.HYPERV_VM_STATE_REBOOT) + + @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") + def test_reboot_soft(self, mock_soft_shutdown): + mock_soft_shutdown.return_value = True + self._test_reboot(vmops.REBOOT_TYPE_SOFT, + constants.HYPERV_VM_STATE_ENABLED) + + @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") + def test_reboot_soft_failed(self, mock_soft_shutdown): + mock_soft_shutdown.return_value = False + self._test_reboot(vmops.REBOOT_TYPE_SOFT, + constants.HYPERV_VM_STATE_REBOOT) + + @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on") + @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") + def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on): + mock_soft_shutdown.return_value = True + mock_power_on.side_effect = vmutils.HyperVException("Expected failure") + instance = fake_instance.fake_instance_obj(self.context) + + self.assertRaises(vmutils.HyperVException, self._vmops.reboot, + instance, {}, vmops.REBOOT_TYPE_SOFT) + + mock_soft_shutdown.assert_called_once_with(instance) + mock_power_on.assert_called_once_with(instance) + + def _test_reboot(self, reboot_type, vm_state): + instance = fake_instance.fake_instance_obj(self.context) + with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: + self._vmops.reboot(instance, {}, reboot_type) + mock_set_state.assert_called_once_with(instance.name, vm_state) + + @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm") + @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") + def test_soft_shutdown(self, mock_wait_for_power_off, mock_shutdown_vm): + instance = fake_instance.fake_instance_obj(self.context) + mock_wait_for_power_off.return_value = True + + result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) + + mock_shutdown_vm.assert_called_once_with(instance.name) + mock_wait_for_power_off.assert_called_once_with( + instance.name, self._FAKE_TIMEOUT) + + self.assertTrue(result) + + @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm") + def test_soft_shutdown_failed(self, mock_shutdown_vm): + instance = fake_instance.fake_instance_obj(self.context) + + mock_shutdown_vm.side_effect = vmutils.HyperVException( + "Expected failure.") + + result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) + + mock_shutdown_vm.assert_called_once_with(instance.name) + self.assertFalse(result) + + def test_get_vm_state(self): + summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED} + + with mock.patch.object(self._vmops._vmutils, + 'get_vm_summary_info') as mock_get_summary_info: + mock_get_summary_info.return_value = summary_info + + response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) + self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED) + + @mock.patch.object(vmops.VMOps, '_get_vm_state') + def test_wait_for_power_off_true(self, mock_get_state): + mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED + result = self._vmops._wait_for_power_off( + mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) + mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME) + self.assertTrue(result) + + @mock.patch.object(vmops.etimeout, "with_timeout") + def test_wait_for_power_off_false(self, mock_with_timeout): + mock_with_timeout.side_effect = etimeout.Timeout() + result = self._vmops._wait_for_power_off( + mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) + self.assertFalse(result) diff --git a/nova/tests/virt/hyperv/test_vmutils.py b/nova/tests/virt/hyperv/test_vmutils.py index a883ce6ee8..65c7f84cb9 100644 --- a/nova/tests/virt/hyperv/test_vmutils.py +++ b/nova/tests/virt/hyperv/test_vmutils.py @@ -147,6 +147,27 @@ def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio): else: self.assertFalse(mock_s.DynamicMemoryEnabled) + def test_soft_shutdown_vm(self): + mock_vm = self._lookup_vm() + mock_shutdown = mock.MagicMock() + mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, ) + mock_vm.associators.return_value = [mock_shutdown] + + with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check: + self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME) + + mock_shutdown.InitiateShutdown.assert_called_once_with( + Force=False, Reason=mock.ANY) + mock_check.assert_called_once_with(self._FAKE_RET_VAL, None) + + def test_soft_shutdown_vm_no_component(self): + mock_vm = self._lookup_vm() + mock_vm.associators.return_value = [] + + with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check: + self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME) + self.assertFalse(mock_check.called) + @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks') def test_get_vm_storage_paths(self, mock_get_vm_disks): self._lookup_vm() diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py index 9c2116ac5e..4aaaf2ffa8 100644 --- a/nova/virt/hyperv/constants.py +++ b/nova/virt/hyperv/constants.py @@ -21,12 +21,14 @@ HYPERV_VM_STATE_ENABLED = 2 HYPERV_VM_STATE_DISABLED = 3 +HYPERV_VM_STATE_SHUTTING_DOWN = 4 HYPERV_VM_STATE_REBOOT = 10 HYPERV_VM_STATE_PAUSED = 32768 HYPERV_VM_STATE_SUSPENDED = 32769 HYPERV_POWER_STATE = { HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN, + HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN, HYPERV_VM_STATE_ENABLED: power_state.RUNNING, HYPERV_VM_STATE_PAUSED: power_state.PAUSED, HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 2cc5bef91c..c44cda09d9 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -20,14 +20,16 @@ import functools import os +from eventlet import timeout as etimeout from oslo.config import cfg from nova.api.metadata import base as instance_metadata from nova import exception -from nova.i18n import _ +from nova.i18n import _, _LI, _LW from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import log as logging +from nova.openstack.common import loopingcall from nova.openstack.common import processutils from nova.openstack.common import units from nova import utils @@ -69,7 +71,12 @@ 'the ratio between the total RAM assigned to an ' 'instance and its startup RAM amount. For example a ' 'ratio of 2.0 for an instance with 1024MB of RAM ' - 'implies 512MB of RAM allocated at startup') + 'implies 512MB of RAM allocated at startup'), + cfg.IntOpt('wait_soft_reboot_seconds', + default=60, + help='Number of seconds to wait for instance to shut down after' + ' soft reboot request is made. We fall back to hard reboot' + ' if instance does not shutdown within this window.'), ] CONF = cfg.CONF @@ -77,6 +84,10 @@ CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('network_api_class', 'nova.network') +SHUTDOWN_TIME_INCREMENT = 5 +REBOOT_TYPE_SOFT = 'SOFT' +REBOOT_TYPE_HARD = 'HARD' + def check_admin_permissions(function): @functools.wraps(function) @@ -393,9 +404,39 @@ def destroy(self, instance, network_info=None, block_device_info=None, def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" LOG.debug("Rebooting instance", instance=instance) + + if reboot_type == REBOOT_TYPE_SOFT: + if self._soft_shutdown(instance): + self.power_on(instance) + return + self._set_vm_state(instance['name'], constants.HYPERV_VM_STATE_REBOOT) + def _soft_shutdown(self, instance, + timeout=CONF.hyperv.wait_soft_reboot_seconds): + """Perform a soft shutdown on the VM. + + :return: True if the instance was shutdown within time limit, + False otherwise. + """ + LOG.debug("Performing Soft shutdown on instance", instance=instance) + + try: + self._vmutils.soft_shutdown_vm(instance.name) + if self._wait_for_power_off(instance.name, timeout): + LOG.info(_LI("Soft shutdown succeded."), instance=instance) + return True + except vmutils.HyperVException as e: + # Exception is raised when trying to shutdown the instance + # while it is still booting. + LOG.warning(_LW("Soft shutdown failed: %s"), e, instance=instance) + return False + + LOG.warning(_LW("Timed out while waiting for soft shutdown."), + instance=instance) + return False + def pause(self, instance): """Pause VM instance.""" LOG.debug("Pause instance", instance=instance) @@ -443,3 +484,36 @@ def _set_vm_state(self, vm_name, req_state): LOG.error(_("Failed to change vm state of %(vm_name)s" " to %(req_state)s"), {'vm_name': vm_name, 'req_state': req_state}) + + def _get_vm_state(self, instance_name): + summary_info = self._vmutils.get_vm_summary_info(instance_name) + return summary_info['EnabledState'] + + def _wait_for_power_off(self, instance_name, time_limit): + """Waiting for a VM to be in a disabled state. + + :return: True if the instance is shutdown within time_limit, + False otherwise. + """ + + desired_vm_states = [constants.HYPERV_VM_STATE_DISABLED] + + def _check_vm_status(instance_name): + if self._get_vm_state(instance_name) in desired_vm_states: + raise loopingcall.LoopingCallDone() + + periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status, + instance_name) + + try: + # add a timeout to the periodic call. + periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT) + etimeout.with_timeout(time_limit, periodic_call.wait) + except etimeout.Timeout: + # VM did not shutdown in the expected time_limit. + return False + finally: + # stop the periodic call, in case of exceptions or Timeout. + periodic_call.stop() + + return True diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index 87c3d5258f..c9621fe748 100755 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -80,8 +80,11 @@ class VMUtils(object): 'Msvm_SyntheticEthernetPortSettingData' _AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement" + _SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent" + _vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2, constants.HYPERV_VM_STATE_DISABLED: 3, + constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4, constants.HYPERV_VM_STATE_REBOOT: 10, constants.HYPERV_VM_STATE_PAUSED: 32768, constants.HYPERV_VM_STATE_SUSPENDED: 32769} @@ -387,6 +390,21 @@ def create_nic(self, vm_name, nic_name, mac_address): self._add_virt_resource(new_nic_data, vm.path_()) + def soft_shutdown_vm(self, vm_name): + vm = self._lookup_vm_check(vm_name) + shutdown_component = vm.associators( + wmi_result_class=self._SHUTDOWN_COMPONENT) + + if not shutdown_component: + # If no shutdown_component is found, it means the VM is already + # in a shutdown state. + return + + reason = 'Soft shutdown requested by OpenStack Nova.' + (ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False, + Reason=reason) + self.check_ret_val(ret_val, None) + def set_vm_state(self, vm_name, req_state): """Set the desired state of the VM.""" vm = self._lookup_vm_check(vm_name) diff --git a/nova/virt/hyperv/vmutilsv2.py b/nova/virt/hyperv/vmutilsv2.py index 61a88291cf..f5d0d50110 100644 --- a/nova/virt/hyperv/vmutilsv2.py +++ b/nova/virt/hyperv/vmutilsv2.py @@ -58,6 +58,7 @@ class VMUtilsV2(vmutils.VMUtils): _vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2, constants.HYPERV_VM_STATE_DISABLED: 3, + constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4, constants.HYPERV_VM_STATE_REBOOT: 11, constants.HYPERV_VM_STATE_PAUSED: 9, constants.HYPERV_VM_STATE_SUSPENDED: 6} From ea86e31a0c714b7ef37ecf0833222c1529b32258 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Fri, 22 Aug 2014 20:21:32 +0200 Subject: [PATCH 463/486] Fix class name for ServerGroupAffinityFilter The class name for ServerGroupAffinityFilter is ServerGroupAffinityFilter and not ServerGroupAntiAffinityFilter. The wrong class lead to a wrong display on http://docs.openstack.org/developer/nova/devref/filter_scheduler.html Change-Id: I690a60cb70eab4837544ddd0bff6f1264e639592 Closes-Bug: #1360375 --- doc/source/devref/filter_scheduler.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst index 872f3f6337..69d06d43f7 100644 --- a/doc/source/devref/filter_scheduler.rst +++ b/doc/source/devref/filter_scheduler.rst @@ -367,7 +367,7 @@ in :mod:``nova.tests.scheduler``. .. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter ` .. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter ` .. |ServerGroupAntiAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` -.. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` +.. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAffinityFilter ` .. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter ` .. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation ` .. |RamWeigher| replace:: :class:`RamWeigher ` From 65a4a6b0085f8639de2df2ccb5a17a5449d676ae Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 14 Aug 2014 11:38:35 -0700 Subject: [PATCH 464/486] neutronv2: treat instance as object in deallocate_for_instance The deallocate_for_instance method is given an instance object but the neutronv2 API was treating it like a dict. This changes the method to use dot notatoin for accessing fields on the object and updates the unit tests to pass an instance object rather than a dict. Part of blueprint compute-manager-objects-juno Change-Id: I784d1d59c23da8485d355251aba26b98bd9c7a16 --- nova/network/neutronv2/api.py | 2 +- nova/tests/network/test_neutronv2.py | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 17162f62b9..78e6590f7d 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -472,7 +472,7 @@ def _populate_neutron_extension_values(self, context, instance, def deallocate_for_instance(self, context, instance, **kwargs): """Deallocate all network resources related to the instance.""" LOG.debug('deallocate_for_instance()', instance=instance) - search_opts = {'device_id': instance['uuid']} + search_opts = {'device_id': instance.uuid} neutron = neutronv2.get_client(context) data = neutron.list_ports(**search_opts) ports = [port['id'] for port in data.get('ports', [])] diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index 6442c407ae..a6f2b44ed4 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -1102,13 +1102,17 @@ def test_allocate_for_instance_port_in_use(self): self.instance, requested_networks=requested_networks) def _deallocate_for_instance(self, number, requested_networks=None): + # TODO(mriedem): Remove this conversion when all neutronv2 APIs are + # converted to handling instance objects. + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) api = neutronapi.API() port_data = number == 1 and self.port_data1 or self.port_data2 ret_data = copy.deepcopy(port_data) if requested_networks: for net, fip, port in requested_networks: ret_data.append({'network_id': net, - 'device_id': self.instance['uuid'], + 'device_id': self.instance.uuid, 'device_owner': 'compute:nova', 'id': port, 'status': 'DOWN', @@ -1116,7 +1120,7 @@ def _deallocate_for_instance(self, number, requested_networks=None): 'fixed_ips': [], 'mac_address': 'fake_mac', }) self.moxed_client.list_ports( - device_id=self.instance['uuid']).AndReturn( + device_id=self.instance.uuid).AndReturn( {'ports': ret_data}) if requested_networks: for net, fip, port in requested_networks: @@ -1126,7 +1130,7 @@ def _deallocate_for_instance(self, number, requested_networks=None): self.mox.StubOutWithMock(api.db, 'instance_info_cache_update') api.db.instance_info_cache_update(self.context, - self.instance['uuid'], + self.instance.uuid, {'network_info': '[]'}) self.mox.ReplayAll() @@ -1153,9 +1157,13 @@ def test_deallocate_for_instance_2(self): self._deallocate_for_instance(2) def test_deallocate_for_instance_port_not_found(self): + # TODO(mriedem): Remove this conversion when all neutronv2 APIs are + # converted to handling instance objects. + self.instance = fake_instance.fake_instance_obj(self.context, + **self.instance) port_data = self.port_data1 self.moxed_client.list_ports( - device_id=self.instance['uuid']).AndReturn( + device_id=self.instance.uuid).AndReturn( {'ports': port_data}) NeutronNotFound = neutronv2.exceptions.NeutronClientException( From aae07caffc41fe4fb497700fe2d81348e92f72bb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 22 Aug 2014 23:33:29 +0000 Subject: [PATCH 465/486] Updated from global requirements Change-Id: I07afee130b7e1a362597784e8e31dc35e6f74674 --- requirements.txt | 2 +- test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9cf68daee2..72f9737287 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,6 +35,6 @@ oslo.config>=1.4.0.0a3 oslo.rootwrap>=1.3.0.0a1 pycadf>=0.5.1 oslo.messaging>=1.4.0.0a3 -oslo.i18n>=0.1.0 # Apache-2.0 +oslo.i18n>=0.2.0 # Apache-2.0 lockfile>=0.8 rfc3986>=0.2.0 # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 971529b382..a6da658e73 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,7 +11,7 @@ psycopg2 pylint==0.25.2 python-subunit>=0.0.18 sphinx>=1.1.2,!=1.2.0,<1.3 -oslosphinx -oslotest +oslosphinx>=2.2.0.0a2 +oslotest>=1.1.0.0a1 testrepository>=0.0.18 testtools>=0.9.34 From 524b545701ea10e0f1a76f737e865021d7e827c3 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 22 Aug 2014 16:55:43 -0700 Subject: [PATCH 466/486] Avoid refreshing PCI devices on instance.save() Currently the way we refresh Instance.pci_devices causes current-version PciDevice and PciDeviceList objects to be sent back to a potentially old client. This is because remotable methods don't undergo backporting obligatorily in the way that remotable_classmethod ones do. This is currently a problem for pci_devices and security_groups, although it is handled differently for things like info_cache so that it's not a problem. The resolution to this is rather complicated, so this patch just avoids the implicit update (which is probably never used and actually generating some more DB traffic) for the time being. Change-Id: I33e584c42c9484912ccc3d38b67a6a7ee3d69a10 --- nova/objects/instance.py | 21 +++++++++++++++------ nova/tests/objects/test_instance.py | 16 ++++++++++++++++ 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/nova/objects/instance.py b/nova/objects/instance.py index a0b43bad38..e0b8fd3f6b 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -273,11 +273,6 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None): objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) - if 'pci_devices' in expected_attrs: - pci_devices = base.obj_make_list( - context, objects.PciDeviceList(context), - objects.PciDevice, db_inst['pci_devices']) - instance['pci_devices'] = pci_devices if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: instance.info_cache = None @@ -289,6 +284,15 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None): instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) + + # TODO(danms): If we are updating these on a backlevel instance, + # we'll end up sending back new versions of these objects (see + # above note for new info_caches + if 'pci_devices' in expected_attrs: + pci_devices = base.obj_make_list( + context, objects.PciDeviceList(context), + objects.PciDevice, db_inst['pci_devices']) + instance['pci_devices'] = pci_devices if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list( context, objects.SecurityGroupList(context), @@ -460,6 +464,10 @@ def _handle_cell_update_from_api(): expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS if self.obj_attr_is_set(attr)] + if 'pci_devices' in expected_attrs: + # NOTE(danms): We don't refresh pci_devices on save right now + expected_attrs.remove('pci_devices') + # NOTE(alaski): We need to pull system_metadata for the # notification.send_update() below. If we don't there's a KeyError # when it tries to extract the flavor. @@ -475,7 +483,8 @@ def _handle_cell_update_from_api(): cells_api = cells_rpcapi.CellsAPI() cells_api.instance_update_at_top(context, inst_ref) - self._from_db_object(context, self, inst_ref, expected_attrs) + self._from_db_object(context, self, inst_ref, + expected_attrs=expected_attrs) notifications.send_update(context, old_ref, inst_ref) self.obj_reset_changes() diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py index 270fe7641c..cb61628640 100644 --- a/nova/tests/objects/test_instance.py +++ b/nova/tests/objects/test_instance.py @@ -379,6 +379,22 @@ def test_save_rename_sends_notification(self): self.assertEqual('goodbye', inst.display_name) self.assertEqual(set([]), inst.obj_what_changed()) + @mock.patch('nova.db.instance_update_and_get_original') + @mock.patch('nova.objects.Instance._from_db_object') + def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update): + # NOTE(danms): This tests that we don't update the pci_devices + # field from the contents of the database. This is not because we + # don't necessarily want to, but because the way pci_devices is + # currently implemented it causes versioning issues. When that is + # resolved, this test should go away. + mock_update.return_value = None, None + inst = instance.Instance(context=self.context, id=123) + inst.uuid = 'foo' + inst.pci_devices = pci_device.PciDeviceList() + inst.save() + self.assertNotIn('pci_devices', + mock_fdo.call_args_list[0][1]['expected_attrs']) + def test_get_deleted(self): fake_inst = dict(self.fake_instance, id=123, deleted=123) fake_uuid = fake_inst['uuid'] From c10177fcae8657890e5a7ba664606961ce63d791 Mon Sep 17 00:00:00 2001 From: Roman Bogorodskiy Date: Thu, 21 Aug 2014 16:34:05 +0400 Subject: [PATCH 467/486] libvirt: driver used memory tests cleanup The libvirt's driver _get_memory_mb_used() currently reports proper stats only if sys.platform is 'linux*', otherwise it reports 0. In order for tests to work as expected on non-Linux platforms, mock 'sys.platform' to return 'linux2' to make sure the proper implementation is called in _get_memory_mb_used(). While here, convert test_get_memory_used_normal() to use mock instead of mox to make the test shorter and look similar to test_get_memory_used_xen() that uses mock already. Change-Id: If37fe2e447a52e73bcdbefc84b729931d2f7d0b8 --- nova/tests/virt/libvirt/test_driver.py | 45 ++++++++------------------ 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index a6bc99f26c..0c8a62591f 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -7347,23 +7347,7 @@ def name(self): mock_list.assert_called_with() def test_get_memory_used_normal(self): - def fake_get_info(): - return ['x86_64', 15814L, 8, 1208, 1, 1, 4, 2] - - self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn') - libvirt_driver.LibvirtDriver._conn.getInfo = fake_get_info - - real_open = __builtin__.open - - class fake_file(object): - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - return False - - def read(self): - return """ + m = mock.mock_open(read_data=""" MemTotal: 16194180 kB MemFree: 233092 kB MemAvailable: 8892356 kB @@ -7371,21 +7355,19 @@ def read(self): Cached: 8362404 kB SwapCached: 0 kB Active: 8381604 kB -""" - - def fake_open(path, *args, **kwargs): - if path == "/proc/meminfo": - return fake_file() - else: - return real_open(path, *args, **kwargs) - - self.mox.StubOutWithMock(__builtin__, 'open') - __builtin__.open = fake_open +""") + with contextlib.nested( + mock.patch("__builtin__.open", m, create=True), + mock.patch.object(libvirt_driver.LibvirtDriver, + "_conn"), + mock.patch('sys.platform', 'linux2'), + ) as (mock_file, mock_conn, mock_platform): + mock_conn.getInfo.return_value = [ + 'x86_64', 15814L, 8, 1208, 1, 1, 4, 2] - self.mox.ReplayAll() - drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - self.assertEqual(6866, drvr._get_memory_mb_used()) + self.assertEqual(6866, drvr._get_memory_mb_used()) def test_get_memory_used_xen(self): self.flags(virt_type='xen', group='libvirt') @@ -7425,7 +7407,8 @@ def UUIDString(self): "_list_instance_domains"), mock.patch.object(libvirt_driver.LibvirtDriver, "_conn"), - ) as (mock_file, mock_list, mock_conn): + mock.patch('sys.platform', 'linux2'), + ) as (mock_file, mock_list, mock_conn, mock_platform): mock_list.return_value = [ DiagFakeDomain(0, 15814), DiagFakeDomain(1, 750), From eaf9522c842614dc83d3fd541bfdc1159416d438 Mon Sep 17 00:00:00 2001 From: julykobe Date: Sat, 23 Aug 2014 19:26:41 +0800 Subject: [PATCH 468/486] fix typo in docstring Change-Id: Id89ddf8110ed63596f23fcabb2996fd0c0afe87f Closes-Bug: #1359002 --- nova/scheduler/filters/type_filter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/scheduler/filters/type_filter.py b/nova/scheduler/filters/type_filter.py index c588239534..6c4712a83e 100644 --- a/nova/scheduler/filters/type_filter.py +++ b/nova/scheduler/filters/type_filter.py @@ -29,7 +29,7 @@ class TypeAffinityFilter(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): """Dynamically limits hosts to one instance type - Return False if host has any instance types other then the requested + Return False if host has any instance types other than the requested type. Return True if all instance types match or if host is empty. """ From 409555530aca8ab2886bd4a5301cadb128875900 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Thu, 14 Aug 2014 11:26:29 +0800 Subject: [PATCH 469/486] Change v3 extended_server_attibutes to v2.1 This patch changes v3 extended_server_attibutes API to v2.1 and makes v2 unit tests share between v2 and v2.1. Revert v3 changes back to v2. os-extended-server-attributes:* -> OS-EXT-SRV-ATTR:* The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: I02b203b62b9e675f71d0ff3880bc9ee051a3b80d --- .../all_extensions/server-get-resp.json | 6 +- .../all_extensions/servers-details-resp.json | 6 +- .../server-get-resp.json | 6 +- .../servers-detail-resp.json | 6 +- .../plugins/v3/extended_server_attributes.py | 7 +- .../test_extended_server_attributes.py | 38 ++++-- .../v3/test_extended_server_attributes.py | 118 ------------------ .../all_extensions/server-get-resp.json.tpl | 6 +- .../servers-details-resp.json.tpl | 6 +- .../server-get-resp.json.tpl | 6 +- .../servers-detail-resp.json.tpl | 6 +- 11 files changed, 54 insertions(+), 157 deletions(-) delete mode 100644 nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py diff --git a/doc/v3/api_samples/all_extensions/server-get-resp.json b/doc/v3/api_samples/all_extensions/server-get-resp.json index 8826f360dc..2ca9fe37ed 100644 --- a/doc/v3/api_samples/all_extensions/server-get-resp.json +++ b/doc/v3/api_samples/all_extensions/server-get-resp.json @@ -50,9 +50,9 @@ "os-access-ips:access_ip_v6": "", "os-config-drive:config_drive": "", "os-extended-availability-zone:availability_zone": "nova", - "os-extended-server-attributes:host": "b8b357f7100d4391828f2177c922ef93", - "os-extended-server-attributes:hypervisor_hostname": "fake-mini", - "os-extended-server-attributes:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "os-extended-status:locked_by": null, "os-extended-status:power_state": 1, "os-extended-status:task_state": null, diff --git a/doc/v3/api_samples/all_extensions/servers-details-resp.json b/doc/v3/api_samples/all_extensions/servers-details-resp.json index 9467fb4a40..047362f6ca 100644 --- a/doc/v3/api_samples/all_extensions/servers-details-resp.json +++ b/doc/v3/api_samples/all_extensions/servers-details-resp.json @@ -51,9 +51,9 @@ "os-access-ips:access_ip_v6": "", "os-config-drive:config_drive": "", "os-extended-availability-zone:availability_zone": "nova", - "os-extended-server-attributes:host": "c3f14e9812ad496baf92ccfb3c61e15f", - "os-extended-server-attributes:hypervisor_hostname": "fake-mini", - "os-extended-server-attributes:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "os-extended-status:locked_by": null, "os-extended-status:power_state": 1, "os-extended-status:task_state": null, diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json b/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json index 24c54d9c5d..b9c6dae1d3 100644 --- a/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json +++ b/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json @@ -46,9 +46,9 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-extended-server-attributes:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", - "os-extended-server-attributes:hypervisor_hostname": "fake-mini", - "os-extended-server-attributes:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "progress": 0, "status": "ACTIVE", "tenant_id": "openstack", diff --git a/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json b/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json index 7236315943..27d8607fad 100644 --- a/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json +++ b/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json @@ -47,9 +47,9 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-extended-server-attributes:host": "bc8efe4fdb7148a4bb921a2b03d17de6", - "os-extended-server-attributes:hypervisor_hostname": "fake-mini", - "os-extended-server-attributes:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "progress": 0, "status": "ACTIVE", "tenant_id": "openstack", diff --git a/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py b/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py index 5fdc418e0f..fae7d48f3e 100644 --- a/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py +++ b/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py @@ -24,15 +24,14 @@ class ExtendedServerAttributesController(wsgi.Controller): def _extend_server(self, context, server, instance): - key = "%s:hypervisor_hostname" % ExtendedServerAttributes.alias + key = "OS-EXT-SRV-ATTR:hypervisor_hostname" server[key] = instance['node'] for attr in ['host', 'name']: if attr == 'name': - key = "%s:instance_%s" % (ExtendedServerAttributes.alias, - attr) + key = "OS-EXT-SRV-ATTR:instance_%s" % attr else: - key = "%s:%s" % (ExtendedServerAttributes.alias, attr) + key = "OS-EXT-SRV-ATTR:%s" % attr server[key] = instance[attr] @wsgi.extends diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py index 4cd9108087..b69771c855 100644 --- a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py +++ b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py @@ -54,25 +54,24 @@ def fake_compute_get_all(*args, **kwargs): db_list, fields) -class ExtendedServerAttributesTest(test.TestCase): +class ExtendedServerAttributesTestV21(test.TestCase): content_type = 'application/json' prefix = 'OS-EXT-SRV-ATTR:' + fake_url = '/v3' def setUp(self): - super(ExtendedServerAttributesTest, self).setUp() + super(ExtendedServerAttributesTestV21, self).setUp() fakes.stub_out_nw_api(self.stubs) self.stubs.Set(compute.api.API, 'get', fake_compute_get) self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all) self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get) - self.flags( - osapi_compute_extension=[ - 'nova.api.openstack.compute.contrib.select_extensions'], - osapi_compute_ext_list=['Extended_server_attributes']) def _make_request(self, url): req = webob.Request.blank(url) req.headers['Accept'] = self.content_type - res = req.get_response(fakes.wsgi_app(init_only=('servers',))) + res = req.get_response( + fakes.wsgi_app_v3(init_only=('servers', + 'os-extended-server-attributes'))) return res def _get_server(self, body): @@ -89,7 +88,7 @@ def assertServerAttributes(self, server, host, node, instance_name): node) def test_show(self): - url = '/v2/fake/servers/%s' % UUID3 + url = self.fake_url + '/servers/%s' % UUID3 res = self._make_request(url) self.assertEqual(res.status_int, 200) @@ -99,7 +98,7 @@ def test_show(self): instance_name=NAME_FMT % 1) def test_detail(self): - url = '/v2/fake/servers/detail' + url = self.fake_url + '/servers/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) @@ -115,13 +114,30 @@ def fake_compute_get(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(compute.api.API, 'get', fake_compute_get) - url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' + url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' res = self._make_request(url) self.assertEqual(res.status_int, 404) -class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTest): +class ExtendedServerAttributesTestV2(ExtendedServerAttributesTestV21): + fake_url = '/v2/fake' + + def setUp(self): + super(ExtendedServerAttributesTestV2, self).setUp() + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Extended_server_attributes']) + + def _make_request(self, url): + req = webob.Request.blank(url) + req.headers['Accept'] = self.content_type + res = req.get_response(fakes.wsgi_app(init_only=('servers',))) + return res + + +class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTestV2): content_type = 'application/xml' ext = extended_server_attributes prefix = '{%s}' % ext.Extended_server_attributes.namespace diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py deleted file mode 100644 index ab9bad4b04..0000000000 --- a/nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -from nova.api.openstack.compute.plugins.v3 import extended_server_attributes -from nova import compute -from nova import db -from nova import exception -from nova import objects -from nova.objects import instance as instance_obj -from nova.openstack.common import jsonutils -from nova import test -from nova.tests.api.openstack import fakes -from nova.tests import fake_instance - -from oslo.config import cfg - - -NAME_FMT = cfg.CONF.instance_name_template -UUID1 = '00000000-0000-0000-0000-000000000001' -UUID2 = '00000000-0000-0000-0000-000000000002' -UUID3 = '00000000-0000-0000-0000-000000000003' - - -def fake_compute_get(*args, **kwargs): - inst = fakes.stub_instance(1, uuid=UUID3, host="host-fake", - node="node-fake") - return fake_instance.fake_instance_obj(args[1], - expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst) - - -def fake_compute_get_all(*args, **kwargs): - db_list = [ - fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"), - fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2") - ] - fields = instance_obj.INSTANCE_DEFAULT_FIELDS - return instance_obj._make_instance_list(args[1], - objects.InstanceList(), - db_list, fields) - - -class ExtendedServerAttributesTest(test.TestCase): - content_type = 'application/json' - prefix = '%s:' % extended_server_attributes.ExtendedServerAttributes.alias - - def setUp(self): - super(ExtendedServerAttributesTest, self).setUp() - fakes.stub_out_nw_api(self.stubs) - self.stubs.Set(compute.api.API, 'get', fake_compute_get) - self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all) - self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get) - - def _make_request(self, url): - req = webob.Request.blank(url) - req.headers['Accept'] = self.content_type - res = req.get_response( - fakes.wsgi_app_v3(init_only=('servers', - 'os-extended-server-attributes'))) - return res - - def _get_server(self, body): - return jsonutils.loads(body).get('server') - - def _get_servers(self, body): - return jsonutils.loads(body).get('servers') - - def assertServerAttributes(self, server, host, node, instance_name): - self.assertEqual(server.get('%shost' % self.prefix), host) - self.assertEqual(server.get('%sinstance_name' % self.prefix), - instance_name) - self.assertEqual(server.get('%shypervisor_hostname' % self.prefix), - node) - - def test_show(self): - url = '/v3/servers/%s' % UUID3 - res = self._make_request(url) - - self.assertEqual(res.status_int, 200) - self.assertServerAttributes(self._get_server(res.body), - host='host-fake', - node='node-fake', - instance_name=NAME_FMT % (1)) - - def test_detail(self): - url = '/v3/servers/detail' - res = self._make_request(url) - - self.assertEqual(res.status_int, 200) - for i, server in enumerate(self._get_servers(res.body)): - self.assertServerAttributes(server, - host='host-%s' % (i + 1), - node='node-%s' % (i + 1), - instance_name=NAME_FMT % (i + 1)) - - def test_no_instance_passthrough_404(self): - - def fake_compute_get(*args, **kwargs): - raise exception.InstanceNotFound(instance_id='fake') - - self.stubs.Set(compute.api.API, 'get', fake_compute_get) - url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' - res = self._make_request(url) - - self.assertEqual(res.status_int, 404) diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl index e000296910..8407eed0e0 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl @@ -50,9 +50,9 @@ "name": "new-server-test", "os-config-drive:config_drive": "", "os-extended-availability-zone:availability_zone": "nova", - "os-extended-server-attributes:host": "%(compute_host)s", - "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s", - "os-extended-server-attributes:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:host": "%(compute_host)s", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "os-extended-status:locked_by": null, "os-extended-status:power_state": 1, "os-extended-status:task_state": null, diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl index 652714cf0f..ba33b4b285 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl @@ -51,9 +51,9 @@ "name": "new-server-test", "os-config-drive:config_drive": "", "os-extended-availability-zone:availability_zone": "nova", - "os-extended-server-attributes:host": "%(compute_host)s", - "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s", - "os-extended-server-attributes:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:host": "%(compute_host)s", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "os-extended-status:locked_by": null, "os-extended-status:power_state": 1, "os-extended-status:task_state": null, diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl index acb0ed6c3d..6149c8b18e 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl @@ -1,8 +1,8 @@ { "server": { - "os-extended-server-attributes:host": "%(compute_host)s", - "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s", - "os-extended-server-attributes:instance_name": "%(instance_name)s", + "OS-EXT-SRV-ATTR:host": "%(compute_host)s", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", + "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl index 115bba4df4..f54ae4ee41 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl @@ -1,9 +1,9 @@ { "servers": [ { - "os-extended-server-attributes:host": "%(compute_host)s", - "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s", - "os-extended-server-attributes:instance_name": "%(instance_name)s", + "OS-EXT-SRV-ATTR:host": "%(compute_host)s", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", + "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s", "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { From 3668c7b66a56f3c8911728e1a06033802d2e43a1 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Sun, 24 Aug 2014 12:51:00 +0000 Subject: [PATCH 470/486] Remove duplicated code in test_versions There is duplicated code in test_versions. This patch removes it for cleanup. Change-Id: Id8ff2147e624a89476a65c3f0ded54bdb26e53ad --- .../api/openstack/compute/test_versions.py | 50 ++++--------------- 1 file changed, 10 insertions(+), 40 deletions(-) diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py index bd0a7d6bd7..ca461b1a00 100644 --- a/nova/tests/api/openstack/compute/test_versions.py +++ b/nova/tests/api/openstack/compute/test_versions.py @@ -120,9 +120,11 @@ def test_get_version_list_302(self): redirect_req = webob.Request.blank('/v2/') self.assertEqual(res.location, redirect_req.url) - def test_get_version_2_detail(self): - req = webob.Request.blank('/v2/') - req.accept = "application/json" + def _test_get_version_2_detail(self, url, accept=None): + if accept is None: + accept = "application/json" + req = webob.Request.blank(url) + req.accept = accept res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/json") @@ -159,44 +161,12 @@ def test_get_version_2_detail(self): } self.assertEqual(expected, version) + def test_get_version_2_detail(self): + self._test_get_version_2_detail('/v2/') + def test_get_version_2_detail_content_type(self): - req = webob.Request.blank('/') - req.accept = "application/json;version=2" - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - self.assertEqual(res.content_type, "application/json") - version = jsonutils.loads(res.body) - expected = { - "version": { - "id": "v2.0", - "status": "CURRENT", - "updated": "2011-01-21T11:33:21Z", - "links": [ - { - "rel": "self", - "href": "http://localhost/v2/", - }, - { - "rel": "describedby", - "type": "text/html", - "href": EXP_LINKS['v2.0']['html'], - }, - ], - "media-types": [ - { - "base": "application/xml", - "type": "application/" - "vnd.openstack.compute+xml;version=2", - }, - { - "base": "application/json", - "type": "application/" - "vnd.openstack.compute+json;version=2", - }, - ], - }, - } - self.assertEqual(expected, version) + accept = "application/json;version=2" + self._test_get_version_2_detail('/', accept=accept) def test_get_version_2_detail_xml(self): req = webob.Request.blank('/v2/') From e844ff8332eaa23a422ce456a3e1b1b74d6a2533 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 20 Aug 2014 07:56:09 +0000 Subject: [PATCH 471/486] Add v3 versions plugin unit test to v2 This patch adds v3 versions plugin unit test to v2 for improving test coverage of v2 API. Change-Id: I59dedcd31ea011d6eda7ff93d26b941334a67f51 --- nova/tests/api/openstack/compute/test_versions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py index ca461b1a00..fae62dca10 100644 --- a/nova/tests/api/openstack/compute/test_versions.py +++ b/nova/tests/api/openstack/compute/test_versions.py @@ -168,6 +168,12 @@ def test_get_version_2_detail_content_type(self): accept = "application/json;version=2" self._test_get_version_2_detail('/', accept=accept) + def test_get_version_2_versions_invalid(self): + req = webob.Request.blank('/v2/versions/1234') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + def test_get_version_2_detail_xml(self): req = webob.Request.blank('/v2/') req.accept = "application/xml" From 2f17815b27cb186558e37c39b783d89dc13db540 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Mon, 18 Aug 2014 11:48:37 +0900 Subject: [PATCH 472/486] Backport V3 flavor extraspecs API unit tests to V2 This patch backport some of the V3 flavor extraspecs API unit tests to V2 unit tests. V2 flavor extraspecs API does not raise HTTPBadRequest error in case provided extraspecs key is not string. It raise the TypeError instead. This patch also correct that and raise HTTPBadRequest in such cases. Change-Id: If73552943a8aa9b714d6748278311e816ec3daf7 --- nova/api/openstack/compute/contrib/flavorextraspecs.py | 4 ++++ .../openstack/compute/contrib/test_flavors_extra_specs.py | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py index f22284d299..5bbb23d26f 100644 --- a/nova/api/openstack/compute/contrib/flavorextraspecs.py +++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py @@ -64,6 +64,10 @@ def _check_extra_specs(self, specs): try: flavors.validate_extra_spec_keys(specs.keys()) + except TypeError: + msg = _("Fail to validate provided extra specs keys. " + "Expected string") + raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidInput as error: raise exc.HTTPBadRequest(explanation=error.format_message()) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py index 1b3c330d15..0da6253310 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py @@ -213,6 +213,9 @@ def test_create_empty_body(self): def test_create_non_dict_extra_specs(self): self._test_create_bad_request({"extra_specs": "non_dict"}) + def test_create_non_string_key(self): + self._test_create_bad_request({"extra_specs": {None: "value1"}}) + def test_create_non_string_value(self): self._test_create_bad_request({"extra_specs": {"key1": None}}) @@ -298,6 +301,9 @@ def test_update_item_too_many_keys(self): def test_update_item_non_dict_extra_specs(self): self._test_update_item_bad_request("non_dict") + def test_update_item_non_string_key(self): + self._test_update_item_bad_request({None: "value1"}) + def test_update_item_non_string_value(self): self._test_update_item_bad_request({"key1": None}) From 69df608fbe1a58c21944188c2584e474de886f28 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Mon, 11 Aug 2014 14:18:18 +0800 Subject: [PATCH 473/486] Change v3 attach_interface to v2.1 This patch changes v3 attach_interface API to v2.1 and makes v2 unit tests share between v2 and v2.1. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: If18676604edc8fea41d7888eb6546245163e91c2 --- .../attach-interfaces-create-req.json | 2 +- .../attach-interfaces-create-resp.json | 2 +- .../attach-interfaces-list-resp.json | 2 +- .../attach-interfaces-show-resp.json | 2 +- .../compute/plugins/v3/attach_interfaces.py | 6 +- .../compute/schemas/v3/attach_interfaces.py | 2 +- .../compute/contrib/test_attach_interfaces.py | 166 ++++--- .../plugins/v3/test_attach_interfaces.py | 450 ------------------ .../attach-interfaces-create-req.json.tpl | 2 +- .../attach-interfaces-create-resp.json.tpl | 2 +- .../attach-interfaces-list-resp.json.tpl | 2 +- .../attach-interfaces-show-resp.json.tpl | 2 +- 12 files changed, 112 insertions(+), 528 deletions(-) delete mode 100644 nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json index af7bdd7af4..d14e791404 100644 --- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json +++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json @@ -1,5 +1,5 @@ { - "interface_attachment": { + "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442" } } \ No newline at end of file diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json index 93b68d9c69..9dff234366 100644 --- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json +++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json @@ -1,5 +1,5 @@ { - "interface_attachment": { + "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json index 9d977378b7..192f9a6487 100644 --- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json +++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json @@ -1,5 +1,5 @@ { - "interface_attachments": [ + "interfaceAttachments": [ { "fixed_ips": [ { diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json index 93b68d9c69..9dff234366 100644 --- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json +++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json @@ -1,5 +1,5 @@ { - "interface_attachment": { + "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", diff --git a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py index 92eadd381c..cb414d78ca 100644 --- a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py +++ b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py @@ -77,7 +77,7 @@ def show(self, req, server_id, id): if port_info['port']['device_id'] != server_id: raise exc.HTTPNotFound() - return {'interface_attachment': _translate_interface_attachment_view( + return {'interfaceAttachment': _translate_interface_attachment_view( port_info['port'])} @extensions.expected_errors((400, 404, 409, 500, 501)) @@ -91,7 +91,7 @@ def create(self, req, server_id, body): port_id = None req_ip = None if body: - attachment = body['interface_attachment'] + attachment = body['interfaceAttachment'] network_id = attachment.get('net_id', None) port_id = attachment.get('port_id', None) try: @@ -177,7 +177,7 @@ def _items(self, req, server_id, entity_maker): ports = data.get('ports', []) results = [entity_maker(port) for port in ports] - return {'interface_attachments': results} + return {'interfaceAttachments': results} class AttachInterfaces(extensions.V3APIExtensionBase): diff --git a/nova/api/openstack/compute/schemas/v3/attach_interfaces.py b/nova/api/openstack/compute/schemas/v3/attach_interfaces.py index 471275a02f..921ebc12fc 100644 --- a/nova/api/openstack/compute/schemas/v3/attach_interfaces.py +++ b/nova/api/openstack/compute/schemas/v3/attach_interfaces.py @@ -15,7 +15,7 @@ create = { 'type': 'object', 'properties': { - 'interface_attachment': { + 'interfaceAttachment': { 'type': 'object', 'properties': { 'net_id': { diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py index ce3a7eb355..ee411d3896 100644 --- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py +++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py @@ -16,7 +16,10 @@ import mock from oslo.config import cfg -from nova.api.openstack.compute.contrib import attach_interfaces +from nova.api.openstack.compute.contrib import attach_interfaces \ + as attach_interfaces_v2 +from nova.api.openstack.compute.plugins.v3 import attach_interfaces \ + as attach_interfaces_v3 from nova.compute import api as compute_api from nova import context from nova import exception @@ -41,6 +44,7 @@ FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444' FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555' FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666' +FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000' port_data1 = { "id": FAKE_PORT_ID1, @@ -96,7 +100,7 @@ def fake_attach_interface(self, context, instance, network_id, port_id, # if no network_id is given when add a port to an instance, use the # first default network. network_id = fake_networks[0] - if network_id == 'bad_id': + if network_id == FAKE_BAD_NET_ID: raise exception.NetworkNotFound(network_id=network_id) if not port_id: port_id = ports[fake_networks.index(network_id)]['id'] @@ -118,9 +122,12 @@ def fake_get_instance(self, *args, **kwargs): return {} -class InterfaceAttachTests(test.NoDBTestCase): +class InterfaceAttachTestsV21(test.NoDBTestCase): + url = '/v3/os-interfaces' + controller_cls = attach_interfaces_v3.InterfaceAttachmentController + def setUp(self): - super(InterfaceAttachTests, self).setUp() + super(InterfaceAttachTestsV21, self).setUp() self.flags(auth_strategy=None, group='neutron') self.flags(url='http://anyhost/', group='neutron') self.flags(url_timeout=30, group='neutron') @@ -135,73 +142,69 @@ def setUp(self): 'port_state': port_data1['status'], 'fixed_ips': port_data1['fixed_ips'], }} + self.attachments = self.controller_cls() @mock.patch.object(compute_api.API, 'get', side_effect=exception.InstanceNotFound(instance_id='')) - def _test_instance_not_found(self, url, func, params, mock_get, + def _test_instance_not_found(self, url, func, args, mock_get, kwargs=None, method='GET'): req = webob.Request.blank(url) req.method = method req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPNotFound, func, req, *params) + if not kwargs: + kwargs = {} + self.assertRaises(exc.HTTPNotFound, func, req, *args, **kwargs) def test_show_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - self._test_instance_not_found('/v2/fake/os-interfaces/fake', - attachments.show, ('fake', 'fake')) + self._test_instance_not_found(self.url + 'fake', + self.attachments.show, ('fake', 'fake')) def test_index_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - self._test_instance_not_found('/v2/fake/os-interfaces', - attachments.index, ('fake', )) - - def test_delete_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - self._test_instance_not_found('/v2/fake/os-interfaces/fake', - attachments.delete, ('fake', 'fake'), - method='DELETE') - - def test_create_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - self._test_instance_not_found('/v2/fake/os-interfaces', - attachments.create, - ('fake', {'interfaceAttachment': {}}), - 'POST') + self._test_instance_not_found(self.url, + self.attachments.index, ('fake', )) + + def test_detach_interface_instance_not_found(self): + self._test_instance_not_found(self.url + '/fake', + self.attachments.delete, + ('fake', 'fake'), method='DELETE') + + def test_attach_interface_instance_not_found(self): + self._test_instance_not_found( + '/v2/fake/os-interfaces', self.attachments.create, ('fake', ), + kwargs={'body': {'interfaceAttachment': {}}}, method='POST') def test_show(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/show') + req = webob.Request.blank(self.url + '/show') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1) + result = self.attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1) self.assertEqual(self.expected_show, result) def test_show_invalid(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/show') + req = webob.Request.blank(self.url + '/show') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPNotFound, - attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1) + self.attachments.show, req, FAKE_UUID2, + FAKE_PORT_ID1) def test_delete(self): self.stubs.Set(compute_api.API, 'detach_interface', fake_detach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/delete') + req = webob.Request.blank(self.url + '/delete') req.method = 'DELETE' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1) + result = self.attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1) self.assertEqual('202 Accepted', result.status) def test_detach_interface_instance_locked(self): @@ -212,15 +215,14 @@ def fake_detach_interface_from_locked_server(self, context, self.stubs.Set(compute_api.API, 'detach_interface', fake_detach_interface_from_locked_server) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/delete') + req = webob.Request.blank(self.url + '/delete') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPConflict, - attachments.delete, + self.attachments.delete, req, FAKE_UUID1, FAKE_PORT_ID1) @@ -228,15 +230,14 @@ def fake_detach_interface_from_locked_server(self, context, def test_delete_interface_not_found(self): self.stubs.Set(compute_api.API, 'detach_interface', fake_detach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/delete') + req = webob.Request.blank(self.url + '/delete') req.method = 'DELETE' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPNotFound, - attachments.delete, + self.attachments.delete, req, FAKE_UUID1, 'invaid-port-id') @@ -249,55 +250,53 @@ def fake_attach_interface_to_locked_server(self, context, self.stubs.Set(compute_api.API, 'attach_interface', fake_attach_interface_to_locked_server) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPConflict, - attachments.create, req, FAKE_UUID1, - jsonutils.loads(req.body)) + self.attachments.create, req, FAKE_UUID1, + body=jsonutils.loads(req.body)) def test_attach_interface_without_network_id(self): self.stubs.Set(compute_api.API, 'attach_interface', fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body)) + result = self.attachments.create(req, FAKE_UUID1, + body=jsonutils.loads(req.body)) self.assertEqual(result['interfaceAttachment']['net_id'], FAKE_NET_ID1) def test_attach_interface_with_network_id(self): self.stubs.Set(compute_api.API, 'attach_interface', fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'POST' req.body = jsonutils.dumps({'interfaceAttachment': {'net_id': FAKE_NET_ID2}}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context - result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body)) + result = self.attachments.create(req, FAKE_UUID1, + body=jsonutils.loads(req.body)) self.assertEqual(result['interfaceAttachment']['net_id'], FAKE_NET_ID2) def _attach_interface_bad_request_case(self, body): self.stubs.Set(compute_api.API, 'attach_interface', fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, - attachments.create, req, FAKE_UUID1, - jsonutils.loads(req.body)) + self.attachments.create, req, FAKE_UUID1, + body=jsonutils.loads(req.body)) def test_attach_interface_with_port_and_network_id(self): body = { @@ -311,7 +310,7 @@ def test_attach_interface_with_port_and_network_id(self): def test_attach_interface_with_invalid_data(self): body = { 'interfaceAttachment': { - 'net_id': 'bad_id' + 'net_id': FAKE_BAD_NET_ID } } self._attach_interface_bad_request_case(body) @@ -324,16 +323,15 @@ def fake_attach_interface_invalid_state(*args, **kwargs): self.stubs.Set(compute_api.API, 'attach_interface', fake_attach_interface_invalid_state) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'POST' req.body = jsonutils.dumps({'interfaceAttachment': {'net_id': FAKE_NET_ID1}}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPConflict, - attachments.create, req, FAKE_UUID1, - jsonutils.loads(req.body)) + self.attachments.create, req, FAKE_UUID1, + body=jsonutils.loads(req.body)) def test_detach_interface_with_invalid_state(self): def fake_detach_interface_invalid_state(*args, **kwargs): @@ -343,14 +341,13 @@ def fake_detach_interface_invalid_state(*args, **kwargs): self.stubs.Set(compute_api.API, 'detach_interface', fake_detach_interface_invalid_state) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'DELETE' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPConflict, - attachments.delete, + self.attachments.delete, req, FAKE_UUID1, FAKE_NET_ID1) @@ -363,16 +360,53 @@ def test_attach_interface_fixed_ip_already_in_use(self, get_mock.side_effect = fake_get_instance attach_mock.side_effect = exception.FixedIpAlreadyInUse( address='10.0.2.2', instance_uuid=FAKE_UUID1) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v2/fake/os-interfaces/attach') + req = webob.Request.blank(self.url + '/attach') req.method = 'POST' req.body = jsonutils.dumps({}) req.headers['content-type'] = 'application/json' req.environ['nova.context'] = self.context self.assertRaises(exc.HTTPBadRequest, - attachments.create, req, FAKE_UUID1, - jsonutils.loads(req.body)) + self.attachments.create, req, FAKE_UUID1, + body=jsonutils.loads(req.body)) attach_mock.assert_called_once_with(self.context, {}, None, None, None) get_mock.assert_called_once_with(self.context, FAKE_UUID1, want_objects=True, expected_attrs=None) + + def _test_attach_interface_with_invalid_parameter(self, param): + self.stubs.Set(compute_api.API, 'attach_interface', + fake_attach_interface) + req = webob.Request.blank(self.url + '/attach') + req.method = 'POST' + req.body = jsonutils.dumps({'interface_attachment': param}) + req.headers['content-type'] = 'application/json' + req.environ['nova.context'] = self.context + self.assertRaises(exception.ValidationError, + self.attachments.create, req, FAKE_UUID1, + body=jsonutils.loads(req.body)) + + def test_attach_interface_instance_with_non_uuid_net_id(self): + param = {'net_id': 'non_uuid'} + self._test_attach_interface_with_invalid_parameter(param) + + def test_attach_interface_instance_with_non_uuid_port_id(self): + param = {'port_id': 'non_uuid'} + self._test_attach_interface_with_invalid_parameter(param) + + def test_attach_interface_instance_with_non_array_fixed_ips(self): + param = {'fixed_ips': 'non_array'} + self._test_attach_interface_with_invalid_parameter(param) + + +class InterfaceAttachTestsV2(InterfaceAttachTestsV21): + url = '/v2/fake/os-interfaces' + controller_cls = attach_interfaces_v2.InterfaceAttachmentController + + def test_attach_interface_instance_with_non_uuid_net_id(self): + pass + + def test_attach_interface_instance_with_non_uuid_port_id(self): + pass + + def test_attach_interface_instance_with_non_array_fixed_ips(self): + pass diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py b/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py deleted file mode 100644 index f0ed6e45a6..0000000000 --- a/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2012 SINA Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo.config import cfg - -from nova.api.openstack.compute.plugins.v3 import attach_interfaces -from nova.compute import api as compute_api -from nova import context -from nova import exception -from nova.network import api as network_api -from nova.openstack.common import jsonutils -from nova import test -from nova.tests import fake_network_cache_model - -import webob -from webob import exc - - -CONF = cfg.CONF - -FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' -FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' - -FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111' -FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222' -FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333' - -FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444' -FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555' -FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666' - -port_data1 = { - "id": FAKE_PORT_ID1, - "network_id": FAKE_NET_ID1, - "admin_state_up": True, - "status": "ACTIVE", - "mac_address": "aa:aa:aa:aa:aa:aa", - "fixed_ips": ["10.0.1.2"], - "device_id": FAKE_UUID1, -} - -port_data2 = { - "id": FAKE_PORT_ID2, - "network_id": FAKE_NET_ID2, - "admin_state_up": True, - "status": "ACTIVE", - "mac_address": "bb:bb:bb:bb:bb:bb", - "fixed_ips": ["10.0.2.2"], - "device_id": FAKE_UUID1, -} - -port_data3 = { - "id": FAKE_PORT_ID3, - "network_id": FAKE_NET_ID3, - "admin_state_up": True, - "status": "ACTIVE", - "mac_address": "bb:bb:bb:bb:bb:bb", - "fixed_ips": ["10.0.2.2"], - "device_id": '', -} - -fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2] -ports = [port_data1, port_data2, port_data3] - - -def fake_list_ports(self, *args, **kwargs): - result = [] - for port in ports: - if port['device_id'] == kwargs['device_id']: - result.append(port) - return {'ports': result} - - -def fake_show_port(self, context, port_id, **kwargs): - for port in ports: - if port['id'] == port_id: - return {'port': port} - - -def fake_attach_interface(self, context, instance, network_id, port_id, - requested_ip='192.168.1.3'): - if not network_id: - # if no network_id is given when add a port to an instance, use the - # first default network. - network_id = fake_networks[0] - if network_id == 'bad_id': - raise exception.NetworkNotFound(network_id=network_id) - if not port_id: - port_id = ports[fake_networks.index(network_id)]['id'] - vif = fake_network_cache_model.new_vif() - vif['id'] = port_id - vif['network']['id'] = network_id - vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip - return vif - - -def fake_detach_interface(self, context, instance, port_id): - for port in ports: - if port['id'] == port_id: - return - raise exception.PortNotFound(port_id=port_id) - - -def fake_get_instance(self, *args, **kwargs): - return {} - - -class InterfaceAttachTests(test.NoDBTestCase): - def setUp(self): - super(InterfaceAttachTests, self).setUp() - self.flags(auth_strategy=None, group='neutron') - self.flags(url='http://anyhost/', group='neutron') - self.flags(url_timeout=30, group='neutron') - self.stubs.Set(network_api.API, 'show_port', fake_show_port) - self.stubs.Set(network_api.API, 'list_ports', fake_list_ports) - self.stubs.Set(compute_api.API, 'get', fake_get_instance) - self.context = context.get_admin_context() - self.expected_show = {'interface_attachment': - {'net_id': FAKE_NET_ID1, - 'port_id': FAKE_PORT_ID1, - 'mac_addr': port_data1['mac_address'], - 'port_state': port_data1['status'], - 'fixed_ips': port_data1['fixed_ips'], - }} - - def test_item_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/') - req.method = 'GET' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - def fake_get_instance_exception(self, context, instance_uuid, - **kwargs): - raise exception.InstanceNotFound(instance_id=instance_uuid) - - self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception) - self.assertRaises(exc.HTTPNotFound, attachments.index, - req, 'fake') - - def test_show(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/show') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1) - self.assertEqual(self.expected_show, result) - - def test_show_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/show') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - def fake_get_instance_exception(self, context, instance_uuid, - **kwargs): - raise exception.InstanceNotFound(instance_id=instance_uuid) - - self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception) - self.assertRaises(exc.HTTPNotFound, attachments.show, - req, 'fake', FAKE_PORT_ID1) - - def test_show_invalid(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/show') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - self.assertRaises(exc.HTTPNotFound, - attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1) - - def test_delete(self): - self.stubs.Set(compute_api.API, 'detach_interface', - fake_detach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/delete') - req.method = 'DELETE' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1) - self.assertEqual('202 Accepted', result.status) - - def test_detach_interface_instance_locked(self): - def fake_detach_interface_from_locked_server(self, context, - instance, port_id): - raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1) - - self.stubs.Set(compute_api.API, - 'detach_interface', - fake_detach_interface_from_locked_server) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/delete') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - self.assertRaises(exc.HTTPConflict, - attachments.delete, - req, - FAKE_UUID1, - FAKE_PORT_ID1) - - def test_delete_interface_not_found(self): - self.stubs.Set(compute_api.API, 'detach_interface', - fake_detach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/delete') - req.method = 'DELETE' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - self.assertRaises(exc.HTTPNotFound, - attachments.delete, - req, - FAKE_UUID1, - 'invaid-port-id') - - def test_delete_instance_not_found(self): - self.stubs.Set(compute_api.API, 'detach_interface', - fake_detach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/delete') - req.method = 'DELETE' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - def fake_get_instance_exception(self, context, instance_uuid, - **kwargs): - raise exception.InstanceNotFound(instance_id=instance_uuid) - - self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception) - self.assertRaises(exc.HTTPNotFound, - attachments.delete, - req, - 'fake', - 'invaid-port-id') - - def test_attach_interface_instance_locked(self): - def fake_attach_interface_to_locked_server(self, context, - instance, network_id, port_id, requested_ip): - raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1) - - self.stubs.Set(compute_api.API, - 'attach_interface', - fake_attach_interface_to_locked_server) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPConflict, - attachments.create, req, FAKE_UUID1, - body=jsonutils.loads(req.body)) - - def test_attach_interface_without_network_id(self): - self.stubs.Set(compute_api.API, 'attach_interface', - fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - result = attachments.create(req, FAKE_UUID1, - body=jsonutils.loads(req.body)) - self.assertEqual(result['interface_attachment']['net_id'], - FAKE_NET_ID1) - - def test_attach_interface_with_network_id(self): - self.stubs.Set(compute_api.API, 'attach_interface', - fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({'interface_attachment': - {'net_id': FAKE_NET_ID2}}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - result = attachments.create(req, - FAKE_UUID1, body=jsonutils.loads(req.body)) - self.assertEqual(result['interface_attachment']['net_id'], - FAKE_NET_ID2) - - def test_attach_interface_with_port_and_network_id(self): - self.stubs.Set(compute_api.API, 'attach_interface', - fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({'interface_attachment': - {'port_id': FAKE_PORT_ID1, - 'net_id': FAKE_NET_ID2}}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - attachments.create, req, FAKE_UUID1, - body=jsonutils.loads(req.body)) - - def test_attach_interface_instance_not_found(self): - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({'interface_attachment': - {'net_id': FAKE_NET_ID2}}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - - def fake_get_instance_exception(self, context, instance_uuid, - **kwargs): - raise exception.InstanceNotFound(instance_id=instance_uuid) - - self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception) - self.assertRaises(exc.HTTPNotFound, - attachments.create, req, 'fake', - body=jsonutils.loads(req.body)) - - def _test_attach_interface_with_invalid_parameter(self, param): - self.stubs.Set(compute_api.API, 'attach_interface', - fake_attach_interface) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({'interface_attachment': param}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exception.ValidationError, - attachments.create, req, FAKE_UUID1, - body=jsonutils.loads(req.body)) - - def test_attach_interface_instance_with_non_uuid_net_id(self): - param = {'net_id': 'non_uuid'} - self._test_attach_interface_with_invalid_parameter(param) - - def test_attach_interface_instance_with_non_uuid_port_id(self): - param = {'port_id': 'non_uuid'} - self._test_attach_interface_with_invalid_parameter(param) - - def test_attach_interface_instance_with_non_array_fixed_ips(self): - param = {'fixed_ips': 'non_array'} - self._test_attach_interface_with_invalid_parameter(param) - - def test_attach_interface_with_invalid_state(self): - def fake_attach_interface_invalid_state(*args, **kwargs): - raise exception.InstanceInvalidState( - instance_uuid='', attr='', state='', - method='attach_interface') - - self.stubs.Set(compute_api.API, 'attach_interface', - fake_attach_interface_invalid_state) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({'interface_attachment': - {'net_id': FAKE_NET_ID1}}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPConflict, - attachments.create, req, FAKE_UUID1, - body=jsonutils.loads(req.body)) - - def test_detach_interface_with_invalid_state(self): - def fake_detach_interface_invalid_state(*args, **kwargs): - raise exception.InstanceInvalidState( - instance_uuid='', attr='', state='', - method='detach_interface') - - self.stubs.Set(compute_api.API, 'detach_interface', - fake_detach_interface_invalid_state) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/delete') - req.method = 'DELETE' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPConflict, - attachments.delete, - req, - FAKE_UUID1, - FAKE_NET_ID1) - - -class InterfaceAttachTestsWithMock(test.NoDBTestCase): - def setUp(self): - super(InterfaceAttachTestsWithMock, self).setUp() - self.flags(auth_strategy=None, group='neutron') - self.flags(url='http://anyhost/', group='neutron') - self.flags(url_timeout=30, group='neutron') - self.context = context.get_admin_context() - - @mock.patch.object(compute_api.API, 'get') - @mock.patch.object(compute_api.API, 'attach_interface') - def test_attach_interface_fixed_ip_already_in_use(self, - attach_mock, - get_mock): - get_mock.side_effect = fake_get_instance - attach_mock.side_effect = exception.FixedIpAlreadyInUse( - address='10.0.3.2', instance_uuid=FAKE_UUID1) - attachments = attach_interfaces.InterfaceAttachmentController() - req = webob.Request.blank( - '/v3/servers/fake/os-attach-interfaces/attach') - req.method = 'POST' - req.body = jsonutils.dumps({}) - req.headers['content-type'] = 'application/json' - req.environ['nova.context'] = self.context - self.assertRaises(exc.HTTPBadRequest, - attachments.create, req, FAKE_UUID1, - body=jsonutils.loads(req.body)) - attach_mock.assert_called_once_with(self.context, {}, None, None, None) - get_mock.assert_called_once_with(self.context, FAKE_UUID1, - want_objects=True, - expected_attrs=None) diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl index e0fcbe86a0..11dcf64373 100644 --- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl @@ -1,5 +1,5 @@ { - "interface_attachment": { + "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442" } } diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl index 93b68d9c69..9dff234366 100644 --- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl @@ -1,5 +1,5 @@ { - "interface_attachment": { + "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl index 9d977378b7..192f9a6487 100644 --- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl @@ -1,5 +1,5 @@ { - "interface_attachments": [ + "interfaceAttachments": [ { "fixed_ips": [ { diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl index 93b68d9c69..9dff234366 100644 --- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl @@ -1,5 +1,5 @@ { - "interface_attachment": { + "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", From 73c2c2db69f1945a267855ad052e6f1dee4ab9d5 Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Fri, 15 Aug 2014 14:49:26 +0800 Subject: [PATCH 474/486] Change ViewBuilder into v2.1 for servers This patch changes v3 servers' ViewBuilder to v2.1. And also change related unittests and api sample tests. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: Ie68d46daa91ef43c84eeef479c0986d5e7f15719 --- .../all_extensions/server-get-resp.json | 4 +- .../all_extensions/servers-details-resp.json | 4 +- .../server-action-rebuild-resp.json | 4 +- .../os-access-ips/server-get-resp.json | 4 +- .../os-access-ips/server-put-resp.json | 4 +- .../os-access-ips/servers-details-resp.json | 4 +- .../server-config-drive-get-resp.json | 4 +- .../servers-config-drive-details-resp.json | 4 +- .../server-get-resp.json | 4 +- .../servers-detail-resp.json | 4 +- .../server-get-resp.json | 4 +- .../servers-detail-resp.json | 4 +- .../os-extended-status/server-get-resp.json | 4 +- .../servers-detail-resp.json | 4 +- .../os-extended-volumes/server-get-resp.json | 4 +- .../servers-detail-resp.json | 4 +- .../server-get-resp.json | 4 +- .../servers-details-resp.json | 4 +- .../api_samples/os-pci/server-get-resp.json | 4 +- .../os-pci/servers-detail-resp.json | 4 +- .../os-rescue/server-get-resp-rescue.json | 4 +- .../os-rescue/server-get-resp-unrescue.json | 4 +- .../os-security-groups/server-get-resp.json | 4 +- .../servers-detail-resp.json | 4 +- .../os-server-usage/server-get-resp.json | 4 +- .../os-server-usage/servers-detail-resp.json | 4 +- .../servers/server-action-rebuild-resp.json | 4 +- .../api_samples/servers/server-get-resp.json | 4 +- .../servers/servers-details-resp.json | 4 +- nova/api/openstack/compute/views/images.py | 17 ------- nova/api/openstack/compute/views/servers.py | 12 ++--- .../compute/plugins/v3/test_servers.py | 45 +++++++++---------- .../all_extensions/server-get-resp.json.tpl | 4 +- .../servers-details-resp.json.tpl | 4 +- .../server-action-rebuild-resp.json.tpl | 4 +- .../os-access-ips/server-get-resp.json.tpl | 4 +- .../os-access-ips/server-put-resp.json.tpl | 4 +- .../servers-details-resp.json.tpl | 4 +- .../server-config-drive-get-resp.json.tpl | 4 +- ...servers-config-drive-details-resp.json.tpl | 4 +- .../server-get-resp.json.tpl | 4 +- .../servers-detail-resp.json.tpl | 4 +- .../server-get-resp.json.tpl | 4 +- .../servers-detail-resp.json.tpl | 4 +- .../server-get-resp.json.tpl | 4 +- .../servers-detail-resp.json.tpl | 4 +- .../server-get-resp.json.tpl | 4 +- .../servers-detail-resp.json.tpl | 4 +- .../server-get-resp.json.tpl | 4 +- .../servers-details-resp.json.tpl | 4 +- .../os-pci/server-get-resp.json.tpl | 4 +- .../os-pci/servers-detail-resp.json.tpl | 4 +- .../os-rescue/server-get-resp-rescue.json.tpl | 4 +- .../server-get-resp-unrescue.json.tpl | 4 +- .../server-get-resp.json.tpl | 4 +- .../servers-detail-resp.json.tpl | 4 +- .../os-server-usage/server-get-resp.json.tpl | 4 +- .../servers-detail-resp.json.tpl | 4 +- ...n-rebuild-preserve-ephemeral-resp.json.tpl | 4 +- .../server-action-rebuild-resp.json.tpl | 4 +- .../servers/server-get-resp.json.tpl | 4 +- .../servers/servers-details-resp.json.tpl | 4 +- 62 files changed, 145 insertions(+), 165 deletions(-) diff --git a/doc/v3/api_samples/all_extensions/server-get-resp.json b/doc/v3/api_samples/all_extensions/server-get-resp.json index 8826f360dc..867d24c025 100644 --- a/doc/v3/api_samples/all_extensions/server-get-resp.json +++ b/doc/v3/api_samples/all_extensions/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "9cc36101a27c2a69c1a18241f6228454d9d7f466bd90c62db8e8b856", + "hostId": "9cc36101a27c2a69c1a18241f6228454d9d7f466bd90c62db8e8b856", "id": "f474386b-4fb6-4e1f-b1d5-d6bf4437f7d5", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/all_extensions/servers-details-resp.json b/doc/v3/api_samples/all_extensions/servers-details-resp.json index 9467fb4a40..0047ffa436 100644 --- a/doc/v3/api_samples/all_extensions/servers-details-resp.json +++ b/doc/v3/api_samples/all_extensions/servers-details-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "f1e160ad2bf07084f3d3e0dfdd0795d80da18a60825322c15775c0dd", + "hostId": "f1e160ad2bf07084f3d3e0dfdd0795d80da18a60825322c15775c0dd", "id": "9cbefc35-d372-40c5-88e2-9fda1b6ea12c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json b/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json index 22e09b6f7b..5c0013842f 100644 --- a/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json +++ b/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "5c8072dbcda8ce3f26deb6662bd7718e1a6d349bdf2296911d1be4ac", + "hostId": "5c8072dbcda8ce3f26deb6662bd7718e1a6d349bdf2296911d1be4ac", "id": "53a63a19-c145-47f8-9ae5-b39d6bff33ec", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-access-ips/server-get-resp.json b/doc/v3/api_samples/os-access-ips/server-get-resp.json index 5810f469ad..e0ce062060 100644 --- a/doc/v3/api_samples/os-access-ips/server-get-resp.json +++ b/doc/v3/api_samples/os-access-ips/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "b3a6fd97c027e18d6d9c7506eea8a236cf2ceca420cfdfe0239a64a8", + "hostId": "b3a6fd97c027e18d6d9c7506eea8a236cf2ceca420cfdfe0239a64a8", "id": "5eedbf0c-c303-4ed3-933a-a4d3732cfa0a", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-access-ips/server-put-resp.json b/doc/v3/api_samples/os-access-ips/server-put-resp.json index 620574c5c6..f8f0cf3d9b 100644 --- a/doc/v3/api_samples/os-access-ips/server-put-resp.json +++ b/doc/v3/api_samples/os-access-ips/server-put-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "ea0fd522e5bc2fea872429b331304a6f930f2d9aa2a5dc95b3c6061a", + "hostId": "ea0fd522e5bc2fea872429b331304a6f930f2d9aa2a5dc95b3c6061a", "id": "fea9595c-ce6e-4565-987e-2d301fe056ac", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-access-ips/servers-details-resp.json b/doc/v3/api_samples/os-access-ips/servers-details-resp.json index c757662332..bce7408599 100644 --- a/doc/v3/api_samples/os-access-ips/servers-details-resp.json +++ b/doc/v3/api_samples/os-access-ips/servers-details-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "9896cb12c9845becf1b9b06c8ff5b131d20300f83e2cdffc92e3f4a4", + "hostId": "9896cb12c9845becf1b9b06c8ff5b131d20300f83e2cdffc92e3f4a4", "id": "934760e1-2b0b-4f9e-a916-eac1e69839dc", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json b/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json index d31c241f1a..1eb56f1cc8 100644 --- a/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json +++ b/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "1642bbdbd61a0f1c513b4bb6e418326103172698104bfa278eca106b", + "hostId": "1642bbdbd61a0f1c513b4bb6e418326103172698104bfa278eca106b", "id": "7838ff1b-b71f-48b9-91e9-7c08de20b249", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json b/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json index 97b96e7a7d..fbc627f34f 100644 --- a/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json +++ b/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "1ed067c90341cd9d94bbe5da960922b56f107262cdc75719a0d97b78", + "hostId": "1ed067c90341cd9d94bbe5da960922b56f107262cdc75719a0d97b78", "id": "f0318e69-11eb-4aed-9840-59b6c72beee8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json b/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json index 940f125cf9..2f4ca3724d 100644 --- a/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json +++ b/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "b75d6736650f9b272223ceb48f4cde001de1856e381613a922117ab7", + "hostId": "b75d6736650f9b272223ceb48f4cde001de1856e381613a922117ab7", "id": "f22e4521-d03a-4e9f-9fd3-016b9e227219", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json b/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json index 9563a0acde..de70fcee6e 100644 --- a/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json +++ b/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "cf5540800371e53064a60b36ff9d6d1d6a8719ffc870c63a270c6bee", + "hostId": "cf5540800371e53064a60b36ff9d6d1d6a8719ffc870c63a270c6bee", "id": "55f43fa2-dc7c-4c0b-b21a-76f9abe516f9", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json b/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json index 24c54d9c5d..34f9a0b788 100644 --- a/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json +++ b/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b", + "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b", "id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json b/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json index 7236315943..961448ad02 100644 --- a/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json +++ b/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc", + "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-status/server-get-resp.json b/doc/v3/api_samples/os-extended-status/server-get-resp.json index 6402094949..a4fc6b56ba 100644 --- a/doc/v3/api_samples/os-extended-status/server-get-resp.json +++ b/doc/v3/api_samples/os-extended-status/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "46d2aa2d637bd55606304b611a1928627ee1278c149aef2206268d6e", + "hostId": "46d2aa2d637bd55606304b611a1928627ee1278c149aef2206268d6e", "id": "a868cb5e-c794-47bf-9cd8-e302b72bb94b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-status/servers-detail-resp.json b/doc/v3/api_samples/os-extended-status/servers-detail-resp.json index 77fd2afa72..8d2f22bf8c 100644 --- a/doc/v3/api_samples/os-extended-status/servers-detail-resp.json +++ b/doc/v3/api_samples/os-extended-status/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "a275e77473e464558c4aba0d68e1914d1164e7ee2f69affde7aaae2b", + "hostId": "a275e77473e464558c4aba0d68e1914d1164e7ee2f69affde7aaae2b", "id": "6c8b5385-e74c-4fd5-add6-2fcf42d74a98", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-volumes/server-get-resp.json b/doc/v3/api_samples/os-extended-volumes/server-get-resp.json index 9f972cb8d1..973c4d40bc 100644 --- a/doc/v3/api_samples/os-extended-volumes/server-get-resp.json +++ b/doc/v3/api_samples/os-extended-volumes/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "8feef92e2152b9970b51dbdade024afbec7f8f03daf7cb335a3c1cb9", + "hostId": "8feef92e2152b9970b51dbdade024afbec7f8f03daf7cb335a3c1cb9", "id": "7d62983e-23df-4320-bc89-bbc77f2a2e40", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json b/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json index e03394fcd3..762de50de1 100644 --- a/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json +++ b/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "f9622ec1b5ab6e3785661ea1c1e0294f95aecbcf27ac4cb60b06bd02", + "hostId": "f9622ec1b5ab6e3785661ea1c1e0294f95aecbcf27ac4cb60b06bd02", "id": "8e479732-7701-48cd-af7a-04d84f51b742", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json b/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json index 15c89f5499..7ee75f9dc6 100644 --- a/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json +++ b/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json @@ -11,13 +11,13 @@ } ] }, - "host_id": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738", + "hostId": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738", "id": "4bdee8c7-507f-40f2-8429-d301edd3791b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json b/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json index 6a911ccd04..135a34860b 100644 --- a/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json +++ b/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json @@ -12,13 +12,13 @@ } ] }, - "host_id": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd", + "hostId": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd", "id": "a747eac1-e3ed-446c-935a-c2a2853f919c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-pci/server-get-resp.json b/doc/v3/api_samples/os-pci/server-get-resp.json index a58574e628..f517aefbb4 100644 --- a/doc/v3/api_samples/os-pci/server-get-resp.json +++ b/doc/v3/api_samples/os-pci/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "b7e88944272df30c113572778bcf5527f02e9c2a745221214536c1a2", + "hostId": "b7e88944272df30c113572778bcf5527f02e9c2a745221214536c1a2", "id": "9dafa6bc-7a9f-45b2-8177-11800ceb7224", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-pci/servers-detail-resp.json b/doc/v3/api_samples/os-pci/servers-detail-resp.json index f38922f3d1..872a5335f0 100644 --- a/doc/v3/api_samples/os-pci/servers-detail-resp.json +++ b/doc/v3/api_samples/os-pci/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "416f83c758ea0f9271018b278a9dcedb91b1190deaa598704b87219b", + "hostId": "416f83c758ea0f9271018b278a9dcedb91b1190deaa598704b87219b", "id": "ef440f98-04e8-46ea-ae74-e24d437040ea", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json b/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json index 1fa15ecfc9..4df7a7cb89 100644 --- a/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json +++ b/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "f04994c5b4aac1cacbb83b09c2506e457d97dd54f620961624574690", + "hostId": "f04994c5b4aac1cacbb83b09c2506e457d97dd54f620961624574690", "id": "2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json b/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json index ebed3eeb8d..79ea58d3ce 100644 --- a/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json +++ b/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "53cd4520a6cc639eeabcae4a0512b93e4675d431002e0b60e2dcfc04", + "hostId": "53cd4520a6cc639eeabcae4a0512b93e4675d431002e0b60e2dcfc04", "id": "edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-security-groups/server-get-resp.json b/doc/v3/api_samples/os-security-groups/server-get-resp.json index c25a140d2a..9993ae9b78 100644 --- a/doc/v3/api_samples/os-security-groups/server-get-resp.json +++ b/doc/v3/api_samples/os-security-groups/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "0e312d6763795d572ccd716973fd078290d9ec446517b222d3395660", + "hostId": "0e312d6763795d572ccd716973fd078290d9ec446517b222d3395660", "id": "f6961f7a-0133-4f27-94cd-901dca4ba426", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-security-groups/servers-detail-resp.json b/doc/v3/api_samples/os-security-groups/servers-detail-resp.json index 49463446ef..435bbd145d 100644 --- a/doc/v3/api_samples/os-security-groups/servers-detail-resp.json +++ b/doc/v3/api_samples/os-security-groups/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "afeeb125d4d37d0a2123e3144a20a6672fda5d4b6cb85ec193430d82", + "hostId": "afeeb125d4d37d0a2123e3144a20a6672fda5d4b6cb85ec193430d82", "id": "1b94e3fc-1b1c-431a-a077-6b280fb720ce", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-server-usage/server-get-resp.json b/doc/v3/api_samples/os-server-usage/server-get-resp.json index 06f977d3c7..5f12283cc7 100644 --- a/doc/v3/api_samples/os-server-usage/server-get-resp.json +++ b/doc/v3/api_samples/os-server-usage/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "73cf3a40601b63f5992894be2daa3712dd599d1c919984951e21edda", + "hostId": "73cf3a40601b63f5992894be2daa3712dd599d1c919984951e21edda", "id": "cee6d136-e378-4cfc-9eec-71797f025991", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/os-server-usage/servers-detail-resp.json b/doc/v3/api_samples/os-server-usage/servers-detail-resp.json index ad4008e90b..d917fdb3f2 100644 --- a/doc/v3/api_samples/os-server-usage/servers-detail-resp.json +++ b/doc/v3/api_samples/os-server-usage/servers-detail-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "117535ce0eda7ee02ebffe2c976173629385481ae3f2bded5e14a66b", + "hostId": "117535ce0eda7ee02ebffe2c976173629385481ae3f2bded5e14a66b", "id": "ae114799-9164-48f5-a036-6ef9310acbc4", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/servers/server-action-rebuild-resp.json b/doc/v3/api_samples/servers/server-action-rebuild-resp.json index abb6dfff88..4c38ad2f8b 100644 --- a/doc/v3/api_samples/servers/server-action-rebuild-resp.json +++ b/doc/v3/api_samples/servers/server-action-rebuild-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", + "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/servers/server-get-resp.json b/doc/v3/api_samples/servers/server-get-resp.json index 03ec3a9245..fa7708f177 100644 --- a/doc/v3/api_samples/servers/server-get-resp.json +++ b/doc/v3/api_samples/servers/server-get-resp.json @@ -20,13 +20,13 @@ } ] }, - "host_id": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", + "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/doc/v3/api_samples/servers/servers-details-resp.json b/doc/v3/api_samples/servers/servers-details-resp.json index f478ac8fa7..6644953453 100644 --- a/doc/v3/api_samples/servers/servers-details-resp.json +++ b/doc/v3/api_samples/servers/servers-details-resp.json @@ -21,13 +21,13 @@ } ] }, - "host_id": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", + "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { - "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", + "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py index b6d735129f..054fcfb1f8 100644 --- a/nova/api/openstack/compute/views/images.py +++ b/nova/api/openstack/compute/views/images.py @@ -147,20 +147,3 @@ def _get_progress(image): "saving": 50, "active": 100, }.get(image.get("status"), 0) - - -class ViewBuilderV3(ViewBuilder): - - def _get_bookmark_link(self, request, identifier, collection_name): - """Create a URL that refers to a specific resource.""" - if collection_name == "images": - glance_url = glance.generate_image_url(identifier) - return self._update_glance_link_prefix(glance_url) - else: - raise NotImplementedError - # NOTE(cyeoh) The V3 version of _get_bookmark_link should - # only ever be called with images as the - # collection_name. The images API has been removed in the - # V3 API and the V3 version of the view only exists for - # the servers view to be able to generate the appropriate - # bookmark link for the image of the instance. diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py index 6d97c36933..621e9617c1 100644 --- a/nova/api/openstack/compute/views/servers.py +++ b/nova/api/openstack/compute/views/servers.py @@ -241,7 +241,9 @@ def __init__(self): """Initialize view builder.""" super(ViewBuilderV3, self).__init__() self._address_builder = views_addresses.ViewBuilderV3() - self._image_builder = views_images.ViewBuilderV3() + # TODO(alex_xu): In V3 API, we correct the image bookmark link to + # use glance endpoint. We revert back it to use nova endpoint for v2.1. + self._image_builder = views_images.ViewBuilder() def show(self, request, instance): """Detailed view of a single instance.""" @@ -253,7 +255,10 @@ def show(self, request, instance): "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), - "host_id": self._get_host_id(instance) or "", + "hostId": self._get_host_id(instance) or "", + # TODO(alex_xu): '_get_image' return {} when there image_ref + # isn't existed in V3 API, we revert it back to return "" in + # V2.1. "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance), "created": timeutils.isotime(instance["created_at"]), @@ -272,7 +277,4 @@ def show(self, request, instance): if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) - # We should modify the "image" to empty dictionary - if not server["server"]["image"]: - server["server"]["image"] = {} return server diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 3aff6f5c68..456b2b8e39 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -302,8 +302,8 @@ def return_instance_with_host(self, *args, **kwargs): server1 = self.controller.show(req, FAKE_UUID) server2 = self.controller.show(req, FAKE_UUID) - self.assertNotEqual(server1['server']['host_id'], - server2['server']['host_id']) + self.assertNotEqual(server1['server']['hostId'], + server2['server']['hostId']) def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark, status="ACTIVE", progress=100): @@ -317,7 +317,7 @@ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark, "progress": progress, "name": "server1", "status": status, - "host_id": '', + "hostId": '', "image": { "id": "10", "links": [ @@ -362,7 +362,7 @@ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark, def test_get_server_by_id(self): self.flags(use_ipv6=True) - image_bookmark = "http://localhost:9292/images/10" + image_bookmark = "http://localhost/images/10" flavor_bookmark = "http://localhost/flavors/1" uuid = FAKE_UUID @@ -378,7 +378,7 @@ def test_get_server_by_id(self): self.assertThat(res_dict, matchers.DictMatches(expected_server)) def test_get_server_with_active_status_by_id(self): - image_bookmark = "http://localhost:9292/images/10" + image_bookmark = "http://localhost/images/10" flavor_bookmark = "http://localhost/flavors/1" new_return_server = fakes.fake_instance_get( @@ -395,7 +395,7 @@ def test_get_server_with_active_status_by_id(self): def test_get_server_with_id_image_ref_by_id(self): image_ref = "10" - image_bookmark = "http://localhost:9292/images/10" + image_bookmark = "http://localhost/images/10" flavor_id = "1" flavor_bookmark = "http://localhost/flavors/1" @@ -1225,7 +1225,7 @@ def test_get_all_server_details(self): "links": [ { "rel": "bookmark", - "href": 'http://localhost:9292/images/10', + "href": 'http://localhost/images/10', }, ], } @@ -1234,7 +1234,7 @@ def test_get_all_server_details(self): for i, s in enumerate(res_dict['servers']): self.assertEqual(s['id'], fakes.get_fake_uuid(i)) - self.assertEqual(s['host_id'], '') + self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % (i + 1)) self.assertEqual(s['image'], expected_image) self.assertEqual(s['flavor'], expected_flavor) @@ -1243,8 +1243,8 @@ def test_get_all_server_details(self): def test_get_all_server_details_with_host(self): """We want to make sure that if two instances are on the same host, - then they return the same host_id. If two instances are on different - hosts, they should return different host_ids. In this test, + then they return the same hostId. If two instances are on different + hosts, they should return different hostIds. In this test, there are 5 instances - 2 on one host and 3 on another. """ @@ -1260,13 +1260,13 @@ def return_servers_with_host(context, *args, **kwargs): res_dict = self.controller.detail(req) server_list = res_dict['servers'] - host_ids = [server_list[0]['host_id'], server_list[1]['host_id']] + host_ids = [server_list[0]['hostId'], server_list[1]['hostId']] self.assertTrue(host_ids[0] and host_ids[1]) self.assertNotEqual(host_ids[0], host_ids[1]) for i, s in enumerate(server_list): self.assertEqual(s['id'], fakes.get_fake_uuid(i)) - self.assertEqual(s['host_id'], host_ids[i % 2]) + self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % (i + 1)) def test_get_servers_joins_pci_devices(self): @@ -2676,7 +2676,7 @@ def test_build_server_with_project_id(self): self.assertThat(output, matchers.DictMatches(expected_server)) def test_build_server_detail(self): - image_bookmark = "http://localhost:9292/images/5" + image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" self_link = "http://localhost/v3/servers/%s" % self.uuid bookmark_link = "http://localhost/servers/%s" % self.uuid @@ -2690,7 +2690,7 @@ def test_build_server_detail(self): "progress": 0, "name": "test_server", "status": "BUILD", - "host_id": '', + "hostId": '', "image": { "id": "5", "links": [ @@ -2734,17 +2734,12 @@ def test_build_server_detail(self): output = self.view_builder.show(self.request, self.instance) self.assertThat(output, matchers.DictMatches(expected_server)) - def test_build_server_no_image(self): - self.instance["image_ref"] = "" - output = self.view_builder.show(self.request, self.instance) - self.assertEqual(output['server']['image'], {}) - def test_build_server_detail_with_fault(self): self.instance['vm_state'] = vm_states.ERROR self.instance['fault'] = fake_instance.fake_fault_obj( self.request.context, self.uuid) - image_bookmark = "http://localhost:9292/images/5" + image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" self_link = "http://localhost/v3/servers/%s" % self.uuid bookmark_link = "http://localhost/servers/%s" % self.uuid @@ -2757,7 +2752,7 @@ def test_build_server_detail_with_fault(self): "created": "2010-10-10T12:00:00Z", "name": "test_server", "status": "ERROR", - "host_id": '', + "hostId": '', "image": { "id": "5", "links": [ @@ -2893,7 +2888,7 @@ def test_build_server_detail_active_status(self): # set the power state of the instance to running self.instance['vm_state'] = vm_states.ACTIVE self.instance['progress'] = 100 - image_bookmark = "http://localhost:9292/images/5" + image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" self_link = "http://localhost/v3/servers/%s" % self.uuid bookmark_link = "http://localhost/servers/%s" % self.uuid @@ -2907,7 +2902,7 @@ def test_build_server_detail_active_status(self): "progress": 100, "name": "test_server", "status": "ACTIVE", - "host_id": '', + "hostId": '', "image": { "id": "5", "links": [ @@ -2958,7 +2953,7 @@ def test_build_server_detail_with_metadata(self): metadata = nova_utils.metadata_to_dict(metadata) self.instance['metadata'] = metadata - image_bookmark = "http://localhost:9292/images/5" + image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" self_link = "http://localhost/v3/servers/%s" % self.uuid bookmark_link = "http://localhost/servers/%s" % self.uuid @@ -2972,7 +2967,7 @@ def test_build_server_detail_with_metadata(self): "progress": 0, "name": "test_server", "status": "BUILD", - "host_id": '', + "hostId": '', "image": { "id": "5", "links": [ diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl index e000296910..8a4b71db8c 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl index 652714cf0f..bc4ae2426d 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl @@ -23,13 +23,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl index 7f9a09d0df..bb115f6085 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl @@ -21,13 +21,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(image_id)s", "links": [ { - "href": "%(glance_host)s/images/%(image_id)s", + "href": "%(host)s/images/%(image_id)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl index 24b097f18f..9454f2be6d 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl index 925a162c30..7e4d0dd7aa 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl index 7a14faf6f0..447fa00500 100644 --- a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl @@ -23,13 +23,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl index 13f51f5875..a7a42de048 100644 --- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl @@ -21,13 +21,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl index cc7fe80d46..65a1bd1f99 100644 --- a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl index e9852475e0..a835dbf54f 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl index 1d69092b09..2d2b4a8c1b 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl @@ -23,13 +23,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl index acb0ed6c3d..6a2cc3152b 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl @@ -24,13 +24,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl index 115bba4df4..88a3fda9d5 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl @@ -25,13 +25,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl index a416cc6fc5..faaa83f54a 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl @@ -25,13 +25,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl index 06eb488262..decce080eb 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl @@ -26,13 +26,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl index b02b77b365..3b38100e51 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl @@ -20,13 +20,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl index 3d6b230170..e2561549a4 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl index ab38bd53f5..3a69fcd321 100644 --- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl @@ -12,13 +12,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl index 185905c922..353d29f480 100644 --- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl @@ -12,13 +12,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl index 920f9c8792..84ad950359 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl @@ -20,13 +20,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl index 265045ff7b..3b2a344d0c 100644 --- a/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl @@ -21,13 +21,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl index 7eae80526d..a7fb13e958 100644 --- a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl @@ -20,13 +20,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl index e56a1a5d77..e3c28e9de2 100644 --- a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl @@ -20,13 +20,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl index fc3bd9ea1d..8c11a95a24 100644 --- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl @@ -20,13 +20,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl index 3306b40a9c..37e4621c73 100644 --- a/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl index 6732556224..7cc853f8fd 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl @@ -22,13 +22,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl index 1adbde9767..458276dc29 100644 --- a/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl @@ -32,7 +32,7 @@ "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] @@ -50,7 +50,7 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "metadata": { "My Server Name": "Apache1" } diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl index 5802293c04..8c8c124b0a 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl @@ -21,13 +21,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl index 5802293c04..8c8c124b0a 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl @@ -21,13 +21,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(uuid)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl index 37cca80f1b..b37fa3d128 100644 --- a/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl @@ -20,13 +20,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] diff --git a/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl index c72018e107..a3d63d064f 100644 --- a/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl @@ -21,13 +21,13 @@ } ] }, - "host_id": "%(hostid)s", + "hostId": "%(hostid)s", "id": "%(id)s", "image": { "id": "%(uuid)s", "links": [ { - "href": "%(glance_host)s/images/%(uuid)s", + "href": "%(host)s/images/%(uuid)s", "rel": "bookmark" } ] From 95d5aa25ec32741e1b2af6bf6e6f13fd1c6466ae Mon Sep 17 00:00:00 2001 From: He Jie Xu Date: Mon, 18 Aug 2014 09:47:20 +0800 Subject: [PATCH 475/486] Change 'changes_since'/'changes-since' into v2.1 style for servers This patch changes 'changes_since' to 'change-since' that same with v2 api. And also change the related unittest. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: If2a95827965bd6955b01190f2d4dd4fcd9c76dc9 --- .../openstack/compute/plugins/v3/servers.py | 19 ++++++++----------- .../compute/plugins/v3/test_servers.py | 4 ++-- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index f0763acafb..13dbd58d73 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -228,28 +228,25 @@ def _get_servers(self, req, is_detail): if 'default' not in task_state: search_opts['task_state'] = task_state - if 'changes_since' in search_opts: + if 'changes-since' in search_opts: try: - parsed = timeutils.parse_isotime(search_opts['changes_since']) + parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: - msg = _('Invalid changes_since value') + msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) - search_opts['changes_since'] = parsed + search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. - # ... Unless 'changes_since' is specified, because 'changes_since' + # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: - if 'changes_since' not in search_opts: - # No 'changes_since', so we only want non-deleted servers + if 'changes-since' not in search_opts: + # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False - if 'changes_since' in search_opts: - search_opts['changes-since'] = search_opts.pop('changes_since') - if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True @@ -997,7 +994,7 @@ def _validate_admin_password(self, password): def _get_server_search_options(self): """Return server search options allowed by non-admin.""" return ('reservation_id', 'name', 'status', 'image', 'flavor', - 'ip', 'changes_since', 'all_tenants') + 'ip', 'changes-since', 'all_tenants') def _get_instance(self, context, instance_uuid): try: diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index 456b2b8e39..b830444b8e 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -1083,7 +1083,7 @@ def fake_get_all(compute_self, context, search_opts=None, self.stubs.Set(compute_api.API, 'get_all', fake_get_all) - params = 'changes_since=2011-01-24T17:08:01Z' + params = 'changes-since=2011-01-24T17:08:01Z' req = fakes.HTTPRequestV3.blank('/servers?%s' % params) servers = self.controller.index(req)['servers'] @@ -1091,7 +1091,7 @@ def fake_get_all(compute_self, context, search_opts=None, self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_allows_changes_since_bad_value(self): - params = 'changes_since=asdf' + params = 'changes-since=asdf' req = fakes.HTTPRequestV3.blank('/servers?%s' % params) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) From b32912f8b47f50f72a59d97387fc7f28e3e096a1 Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Sat, 23 Aug 2014 20:34:55 +0200 Subject: [PATCH 476/486] Make Object FieldType from_primitive pass objects In case we have a ListOfObjects field on an object with a remotable method, due to NovaObjectsSerializer now looking into dicts recursively, and thus also into the updates dict that object_action returns, it is possible that objects inside a list have already been deserialized, so we make the Fields from_primitive just pass them on, since the job was already done by the NovaObjectsSerializer. Change-Id: Ib2a34a115cb2d0a2a0765c81d5dd7ef331077eb5 Closes-bug: #1360656 --- nova/objects/fields.py | 4 ++++ nova/tests/objects/test_fields.py | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/objects/fields.py b/nova/objects/fields.py index ecdcbde361..4270d52beb 100644 --- a/nova/objects/fields.py +++ b/nova/objects/fields.py @@ -458,6 +458,10 @@ def to_primitive(obj, attr, value): def from_primitive(obj, attr, value): # FIXME(danms): Avoid circular import from base.py from nova.objects import base as obj_base + # NOTE (ndipanov): If they already got hydrated by the serializer, just + # pass them back unchanged + if isinstance(value, obj_base.NovaObject): + return value return obj_base.NovaObject.obj_from_primitive(value, obj._context) def describe(self): diff --git a/nova/tests/objects/test_fields.py b/nova/tests/objects/test_fields.py index 7fc6aeceb2..b3734e4f28 100644 --- a/nova/tests/objects/test_fields.py +++ b/nova/tests/objects/test_fields.py @@ -296,7 +296,8 @@ class OtherTestableObject(obj_base.NovaObject): self.coerce_bad_values = [OtherTestableObject(), 1, 'foo'] self.to_primitive_values = [(test_inst, test_inst.obj_to_primitive())] self.from_primitive_values = [(test_inst.obj_to_primitive(), - test_inst)] + test_inst), + (test_inst, test_inst)] def test_stringify(self): obj = self._test_cls(uuid='fake-uuid') From 5e6b6b044b2ce3d2ae033ce756dc7473ade9c134 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Tue, 12 Aug 2014 12:40:55 +0800 Subject: [PATCH 477/486] Change v3 admin-password to v2.1 This patch changes v3 admin-password API to v2.1 and backport v3 unit test cases to v2.1. The differences between v2 and v3 are described on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3 Partially implements blueprint v2-on-v3-api Change-Id: I167753eddd188120236c51bade549c907c8a8466 --- .../admin-password-change-password.json | 6 +- .../compute/plugins/v3/admin_password.py | 11 ++-- .../compute/schemas/v3/admin_password.py | 8 +-- .../v3 => contrib}/test_admin_password.py | 58 +++++++++---------- .../admin-password-change-password.json.tpl | 4 +- .../integrated/v3/test_admin_password.py | 2 +- 6 files changed, 43 insertions(+), 46 deletions(-) rename nova/tests/api/openstack/compute/{plugins/v3 => contrib}/test_admin_password.py (67%) diff --git a/doc/v3/api_samples/os-admin-password/admin-password-change-password.json b/doc/v3/api_samples/os-admin-password/admin-password-change-password.json index 6fbfbea80f..94855a4e8c 100644 --- a/doc/v3/api_samples/os-admin-password/admin-password-change-password.json +++ b/doc/v3/api_samples/os-admin-password/admin-password-change-password.json @@ -1,5 +1,5 @@ { - "change_password" : { - "admin_password" : "foo" + "changePassword" : { + "adminPass" : "foo" } -} \ No newline at end of file +} diff --git a/nova/api/openstack/compute/plugins/v3/admin_password.py b/nova/api/openstack/compute/plugins/v3/admin_password.py index 7bf0f86529..4bb45bbe1c 100644 --- a/nova/api/openstack/compute/plugins/v3/admin_password.py +++ b/nova/api/openstack/compute/plugins/v3/admin_password.py @@ -34,15 +34,18 @@ def __init__(self, *args, **kwargs): super(AdminPasswordController, self).__init__(*args, **kwargs) self.compute_api = compute.API() - @wsgi.action('change_password') - @wsgi.response(204) + # TODO(eliqiao): Here should be 204(No content) instead of 202 by v2.1 + # +micorversions because the password has been changed when returning + # a response. + @wsgi.action('changePassword') + @wsgi.response(202) @extensions.expected_errors((400, 404, 409, 501)) @validation.schema(admin_password.change_password) def change_password(self, req, id, body): context = req.environ['nova.context'] authorize(context) - password = body['change_password']['admin_password'] + password = body['changePassword']['adminPass'] instance = common.get_instance(self.compute_api, context, id, want_objects=True) try: @@ -51,7 +54,7 @@ def change_password(self, req, id, body): raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as e: raise common.raise_http_conflict_for_instance_invalid_state( - e, 'change_password') + e, 'changePassword') except NotImplementedError: msg = _("Unable to set password on instance") raise exc.HTTPNotImplemented(explanation=msg) diff --git a/nova/api/openstack/compute/schemas/v3/admin_password.py b/nova/api/openstack/compute/schemas/v3/admin_password.py index 04bcad7bbf..a36b70950c 100644 --- a/nova/api/openstack/compute/schemas/v3/admin_password.py +++ b/nova/api/openstack/compute/schemas/v3/admin_password.py @@ -18,15 +18,15 @@ change_password = { 'type': 'object', 'properties': { - 'change_password': { + 'changePassword': { 'type': 'object', 'properties': { - 'admin_password': parameter_types.admin_password, + 'adminPass': parameter_types.admin_password, }, - 'required': ['admin_password'], + 'required': ['adminPass'], 'additionalProperties': False, }, }, - 'required': ['change_password'], + 'required': ['changePassword'], 'additionalProperties': False, } diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py b/nova/tests/api/openstack/compute/contrib/test_admin_password.py similarity index 67% rename from nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py rename to nova/tests/api/openstack/compute/contrib/test_admin_password.py index 78cd3f50a6..0005d7d2e5 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py +++ b/nova/tests/api/openstack/compute/contrib/test_admin_password.py @@ -15,7 +15,8 @@ # under the License. import webob -from nova.api.openstack.compute.plugins.v3 import admin_password +from nova.api.openstack.compute.plugins.v3 import admin_password \ + as admin_password_v21 from nova.compute import api as compute_api from nova import exception from nova.openstack.common import jsonutils @@ -45,18 +46,19 @@ def fake_set_admin_password_not_implemented(self, context, instance, raise NotImplementedError() -class AdminPasswordTest(test.NoDBTestCase): +class AdminPasswordTestV21(test.NoDBTestCase): + plugin = admin_password_v21 def setUp(self): - super(AdminPasswordTest, self).setUp() + super(AdminPasswordTestV21, self).setUp() self.stubs.Set(compute_api.API, 'set_admin_password', fake_set_admin_password) self.stubs.Set(compute_api.API, 'get', fake_get) self.app = fakes.wsgi_app_v3(init_only=('servers', - admin_password.ALIAS)) + self.plugin.ALIAS)) - def _make_request(self, url, body): - req = webob.Request.blank(url) + def _make_request(self, body): + req = webob.Request.blank('/v3/servers/1/action') req.method = 'POST' req.body = jsonutils.dumps(body) req.content_type = 'application/json' @@ -64,54 +66,46 @@ def _make_request(self, url, body): return res def test_change_password(self): - url = '/v3/servers/1/action' - body = {'change_password': {'admin_password': 'test'}} - res = self._make_request(url, body) - self.assertEqual(res.status_int, 204) + body = {'changePassword': {'adminPass': 'test'}} + res = self._make_request(body) + self.assertEqual(res.status_int, 202) def test_change_password_empty_string(self): - url = '/v3/servers/1/action' - body = {'change_password': {'admin_password': ''}} - res = self._make_request(url, body) - self.assertEqual(res.status_int, 204) + body = {'changePassword': {'adminPass': ''}} + res = self._make_request(body) + self.assertEqual(res.status_int, 202) def test_change_password_with_non_implement(self): - url = '/v3/servers/1/action' - body = {'change_password': {'admin_password': 'test'}} + body = {'changePassword': {'adminPass': 'test'}} self.stubs.Set(compute_api.API, 'set_admin_password', fake_set_admin_password_not_implemented) - res = self._make_request(url, body) + res = self._make_request(body) self.assertEqual(res.status_int, 501) def test_change_password_with_non_existed_instance(self): - url = '/v3/servers/1/action' - body = {'change_password': {'admin_password': 'test'}} + body = {'changePassword': {'adminPass': 'test'}} self.stubs.Set(compute_api.API, 'get', fake_get_non_existent) - res = self._make_request(url, body) + res = self._make_request(body) self.assertEqual(res.status_int, 404) def test_change_password_with_non_string_password(self): - url = '/v3/servers/1/action' - body = {'change_password': {'admin_password': 1234}} - res = self._make_request(url, body) + body = {'changePassword': {'adminPass': 1234}} + res = self._make_request(body) self.assertEqual(res.status_int, 400) def test_change_password_failed(self): - url = '/v3/servers/1/action' - body = {'change_password': {'admin_password': 'test'}} + body = {'changePassword': {'adminPass': 'test'}} self.stubs.Set(compute_api.API, 'set_admin_password', fake_set_admin_password_failed) - res = self._make_request(url, body) + res = self._make_request(body) self.assertEqual(res.status_int, 409) def test_change_password_without_admin_password(self): - url = '/v3/servers/1/action' - body = {'change_password': {}} - res = self._make_request(url, body) + body = {'changPassword': {}} + res = self._make_request(body) self.assertEqual(res.status_int, 400) def test_change_password_none(self): - url = '/v3/servers/1/action' - body = {'change_password': None} - res = self._make_request(url, body) + body = {'changePassword': None} + res = self._make_request(body) self.assertEqual(res.status_int, 400) diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl index f58ef6e484..da615718fe 100644 --- a/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl @@ -1,5 +1,5 @@ { - "change_password" : { - "admin_password" : "%(password)s" + "changePassword" : { + "adminPass" : "%(password)s" } } diff --git a/nova/tests/integrated/v3/test_admin_password.py b/nova/tests/integrated/v3/test_admin_password.py index fb4a97e7a9..7b2a858552 100644 --- a/nova/tests/integrated/v3/test_admin_password.py +++ b/nova/tests/integrated/v3/test_admin_password.py @@ -25,5 +25,5 @@ def test_server_password(self): response = self._do_post('servers/%s/action' % uuid, 'admin-password-change-password', subs) - self.assertEqual(response.status, 204) + self.assertEqual(response.status, 202) self.assertEqual(response.read(), "") From 2341ccb2ffaaacd493043073a7c730582b1288ec Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Tue, 15 Jul 2014 12:10:45 +0200 Subject: [PATCH 478/486] Add numa_topology column to the compute_node table This patch adds the migration to add the numa_topology column to the compute_node table. It also adds the field to the ORM model and the Nova object class. Change-Id: I4ca994662ed700a907f78aa04016b626fc351b25 Blueprint: virt-driver-numa-placement --- .../251_add_numa_topology_to_comput_nodes.py | 41 +++++++++++++++++++ nova/db/sqlalchemy/models.py | 4 ++ nova/objects/compute_node.py | 10 ++++- nova/objects/service.py | 15 ++++++- nova/tests/compute/test_compute.py | 1 + nova/tests/compute/test_multiple_nodes.py | 1 + nova/tests/db/test_db_api.py | 2 +- nova/tests/db/test_migrations.py | 18 ++++++++ nova/tests/objects/test_compute_node.py | 11 +++++ nova/tests/objects/test_objects.py | 10 ++--- 10 files changed, 103 insertions(+), 10 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py b/nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py new file mode 100644 index 0000000000..c2510d9b07 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Column +from sqlalchemy import MetaData +from sqlalchemy import Table +from sqlalchemy import Text + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + compute_nodes = Table('compute_nodes', meta, autoload=True) + shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) + + numa_topology = Column('numa_topology', Text, nullable=True) + shadow_numa_topology = Column('numa_topology', Text, nullable=True) + compute_nodes.create_column(numa_topology) + shadow_compute_nodes.create_column(shadow_numa_topology) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + compute_nodes = Table('compute_nodes', meta, autoload=True) + shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) + + compute_nodes.drop_column('numa_topology') + shadow_compute_nodes.drop_column('numa_topology') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9f985963c9..bf52b5622b 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -131,6 +131,10 @@ class ComputeNode(BASE, NovaBase): # json-encode string containing compute node statistics stats = Column(Text, default='{}') + # json-encoded dict that contains NUMA topology as generated by + # nova.virt.hardware.VirtNUMAHostTopology.to_json() + numa_topology = Column(Text) + class Certificate(BASE, NovaBase): """Represents a x509 certificate.""" diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py index 27b52b1294..6ed291ac18 100644 --- a/nova/objects/compute_node.py +++ b/nova/objects/compute_node.py @@ -27,7 +27,8 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject): # Version 1.2: String attributes updated to support unicode # Version 1.3: Added stats field # Version 1.4: Added host ip field - VERSION = '1.4' + # Version 1.5: Added numa_topology field + VERSION = '1.5' fields = { 'id': fields.IntegerField(read_only=True), @@ -50,10 +51,13 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject): 'metrics': fields.StringField(nullable=True), 'stats': fields.DictOfNullableStringsField(nullable=True), 'host_ip': fields.IPAddressField(nullable=True), + 'numa_topology': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = utils.convert_version_to_tuple(target_version) + if target_version < (1, 5) and 'numa_topology' in primitive: + del primitive['numa_topology'] if target_version < (1, 4) and 'host_ip' in primitive: del primitive['host_ip'] if target_version < (1, 3) and 'stats' in primitive: @@ -137,7 +141,8 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject): # Version 1.1 ComputeNode version 1.3 # Version 1.2 Add get_by_service() # Version 1.3 ComputeNode version 1.4 - VERSION = '1.3' + # Version 1.4 ComputeNode version 1.5 + VERSION = '1.4' fields = { 'objects': fields.ListOfObjectsField('ComputeNode'), } @@ -147,6 +152,7 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject): '1.1': '1.3', '1.2': '1.3', '1.3': '1.4', + '1.4': '1.5', } @base.remotable_classmethod diff --git a/nova/objects/service.py b/nova/objects/service.py index 00fb8ac683..23bab6fa10 100644 --- a/nova/objects/service.py +++ b/nova/objects/service.py @@ -19,6 +19,7 @@ from nova.objects import base from nova.objects import fields from nova.openstack.common import log as logging +from nova import utils LOG = logging.getLogger(__name__) @@ -28,7 +29,8 @@ class Service(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added compute_node nested object # Version 1.2: String attributes updated to support unicode - VERSION = '1.2' + # Version 1.3: ComputeNode version 1.5 + VERSION = '1.3' fields = { 'id': fields.IntegerField(read_only=True), @@ -42,6 +44,13 @@ class Service(base.NovaPersistentObject, base.NovaObject): 'compute_node': fields.ObjectField('ComputeNode'), } + def obj_make_compatible(self, primitive, target_version): + target_version = utils.convert_version_to_tuple(target_version) + if target_version < (1, 3) and 'compute_node' in primitive: + primitive['compute_node'] = ( + objects.ComputeNode().object_make_compatible( + primitive, '1.4')) + @staticmethod def _do_compute_node(context, service, db_service): try: @@ -128,7 +137,8 @@ def destroy(self, context): class ServiceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Service <= version 1.2 - VERSION = '1.0' + # Version 1.2 Service version 1.3 + VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Service'), @@ -136,6 +146,7 @@ class ServiceList(base.ObjectListBase, base.NovaObject): child_versions = { '1.0': '1.2', # NOTE(danms): Service was at 1.2 before we added this + '1.1': '1.3', } @base.remotable_classmethod diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 37bd3df053..1fbaff2a12 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -199,6 +199,7 @@ def fake_get_compute_nodes_in_db(context): 'free_ram_mb': 130560, 'metrics': '', 'stats': '', + 'numa_topology': '', 'id': 2, 'host_ip': '127.0.0.1'}] return [objects.ComputeNode._from_db_object( diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py index 44177141b6..c248ce4594 100644 --- a/nova/tests/compute/test_multiple_nodes.py +++ b/nova/tests/compute/test_multiple_nodes.py @@ -106,6 +106,7 @@ def fake_get_compute_nodes_in_db(context): 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', + 'numa_topology': '', 'stats': '', 'id': 2, 'host_ip': '127.0.0.1'}] diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 9c5a856a9e..9644337247 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -5541,7 +5541,7 @@ def setUp(self): pci_stats='', metrics='', extra_resources='', - stats='') + stats='', numa_topology='') # add some random stats self.stats = dict(num_instances=3, num_proj_12345=2, num_proj_23456=2, num_vm_building=3) diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index 5a6d0da8bb..a85daeb651 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -772,6 +772,24 @@ def _post_downgrade_250(self, engine): oslodbutils.get_table(engine, 'instance_group_metadata') oslodbutils.get_table(engine, 'shadow_instance_group_metadata') + def _check_251(self, engine, data): + self.assertColumnExists(engine, 'compute_nodes', 'numa_topology') + self.assertColumnExists( + engine, 'shadow_compute_nodes', 'numa_topology') + + compute_nodes = oslodbutils.get_table(engine, 'compute_nodes') + shadow_compute_nodes = oslodbutils.get_table( + engine, 'shadow_compute_nodes') + self.assertIsInstance(compute_nodes.c.numa_topology.type, + sqlalchemy.types.Text) + self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type, + sqlalchemy.types.Text) + + def _post_downgrade_251(self, engine): + self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology') + self.assertColumnNotExists( + engine, 'shadow_compute_nodes', 'numa_topology') + class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" diff --git a/nova/tests/objects/test_compute_node.py b/nova/tests/objects/test_compute_node.py index a7b89bc22c..c02646f3e2 100644 --- a/nova/tests/objects/test_compute_node.py +++ b/nova/tests/objects/test_compute_node.py @@ -21,6 +21,7 @@ from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova.tests.objects import test_objects +from nova.virt import hardware NOW = timeutils.utcnow().replace(microsecond=0) fake_stats = {'num_foo': '10'} @@ -28,6 +29,10 @@ # host_ip is coerced from a string to an IPAddress # but needs to be converted to a string for the database format fake_host_ip = '127.0.0.1' +fake_numa_topology = hardware.VirtNUMAHostTopology( + cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 512), + hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 512)]) +fake_numa_topology_db_format = fake_numa_topology.to_json() fake_compute_node = { 'created_at': NOW, 'updated_at': None, @@ -53,6 +58,7 @@ 'metrics': '', 'stats': fake_stats_db_format, 'host_ip': fake_host_ip, + 'numa_topology': fake_numa_topology_db_format, } @@ -189,6 +195,11 @@ def test_get_by_service(self, service_get): comparators={'stats': self.json_comparator, 'host_ip': self.str_comparator}) + def test_compat_numa_topology(self): + compute = compute_node.ComputeNode() + primitive = compute.obj_to_primitive(target_version='1.4') + self.assertNotIn('numa_topology', primitive) + class TestComputeNodeObject(test_objects._LocalTest, _TestComputeNodeObject): diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py index 0415439971..7897438a76 100644 --- a/nova/tests/objects/test_objects.py +++ b/nova/tests/objects/test_objects.py @@ -930,8 +930,8 @@ def test_object_serialization_iterables(self): 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a', 'BlockDeviceMapping': '1.1-9968ffe513e7672484b0f528b034cd0f', 'BlockDeviceMappingList': '1.2-a6df0a8ef84d6bbaba51143499e9bed2', - 'ComputeNode': '1.4-ed20e7a7c1a4612fe7d2836d5887c726', - 'ComputeNodeList': '1.3-1c9c281e02182eabffa6b63ee349996a', + 'ComputeNode': '1.5-57ce5a07c727ffab6c51723bb8dccbfe', + 'ComputeNodeList': '1.4-a993fa58c16f423c72496c7555e99987', 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba', 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4', 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99', @@ -972,8 +972,8 @@ def test_object_serialization_iterables(self): 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f', 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576', 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c', - 'Service': '1.2-5a3df338c669e1148251431370b440ef', - 'ServiceList': '1.0-2c960ac9bc56a12c65b9118bb3a58b44', + 'Service': '1.3-5a3df338c669e1148251431370b440ef', + 'ServiceList': '1.1-818bc6a463721e42fbb4fbf6f68c4eeb', 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd', 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2', 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6', @@ -991,7 +991,7 @@ def test_object_serialization_iterables(self): 'SecurityGroupList': '1.0'}, 'MyObj': {'MyOwnedObject': '1.0'}, 'SecurityGroupRule': {'SecurityGroup': '1.1'}, - 'Service': {'ComputeNode': '1.4'}, + 'Service': {'ComputeNode': '1.5'}, 'TestSubclassedObject': {'MyOwnedObject': '1.0'} } From e72c5f7ea246230d4763333cb2a2cf0c646b1d16 Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Fri, 22 Aug 2014 18:42:48 +0200 Subject: [PATCH 479/486] Add a Set and SetOfIntegers object fields Object fields that represent sets of things. Will be useful for NUMA topology object which will be added in subsequent patches. Change-Id: I66d61c760a70ad661dff4c40c6933df06192ee8f Blueprint: virt-driver-numa-placement --- nova/objects/fields.py | 28 ++++++++++++++++++++++++++++ nova/tests/objects/test_fields.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/nova/objects/fields.py b/nova/objects/fields.py index 4270d52beb..372e7e600c 100644 --- a/nova/objects/fields.py +++ b/nova/objects/fields.py @@ -434,6 +434,30 @@ def stringify(self, value): for key, val in sorted(value.items())])) +class Set(CompoundFieldType): + def coerce(self, obj, attr, value): + if not isinstance(value, set): + raise ValueError(_('A set is required here')) + + coerced = set() + for element in value: + coerced.add(self._element_type.coerce( + obj, '%s["%s"]' % (attr, element), element)) + return coerced + + def to_primitive(self, obj, attr, value): + return tuple( + self._element_type.to_primitive(obj, attr, x) for x in value) + + def from_primitive(self, obj, attr, value): + return set([self._element_type.from_primitive(obj, attr, x) + for x in value]) + + def stringify(self, value): + return 'set([%s])' % ( + ','.join([self._element_type.stringify(x) for x in value])) + + class Object(FieldType): def __init__(self, obj_name, **kwargs): self._obj_name = obj_name @@ -575,6 +599,10 @@ class ListOfStringsField(AutoTypedField): AUTO_TYPE = List(String()) +class SetOfIntegersField(AutoTypedField): + AUTO_TYPE = Set(Integer()) + + class ListOfDictOfNullableStringsField(AutoTypedField): AUTO_TYPE = List(Dict(String(), nullable=True)) diff --git a/nova/tests/objects/test_fields.py b/nova/tests/objects/test_fields.py index b3734e4f28..cfc5b87d9f 100644 --- a/nova/tests/objects/test_fields.py +++ b/nova/tests/objects/test_fields.py @@ -271,6 +271,34 @@ def test_stringify(self): self.assertEqual("['abc']", self.field.stringify(['abc'])) +class TestSet(TestField): + def setUp(self): + super(TestSet, self).setUp() + self.field = fields.Field(fields.Set(FakeFieldType())) + self.coerce_good_values = [(set(['foo', 'bar']), + set(['*foo*', '*bar*']))] + self.coerce_bad_values = [['foo'], {'foo': 'bar'}] + self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))] + self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))] + + def test_stringify(self): + self.assertEqual('set([123])', self.field.stringify(set([123]))) + + +class TestSetOfIntegers(TestField): + def setUp(self): + super(TestSetOfIntegers, self).setUp() + self.field = fields.SetOfIntegersField() + self.coerce_good_values = [(set(['1', 2]), + set([1, 2]))] + self.coerce_bad_values = [set(['foo'])] + self.to_primitive_values = [(set([1]), tuple([1]))] + self.from_primitive_values = [(tuple([1]), set([1]))] + + def test_stringify(self): + self.assertEqual('set([1,2])', self.field.stringify(set([1, 2]))) + + class TestObject(TestField): def setUp(self): super(TestObject, self).setUp() From f9930a85b02f768d75f0917cbf52c3559d8597b5 Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Fri, 15 Aug 2014 11:02:32 +0800 Subject: [PATCH 480/486] Change v3 extended_status to v2.1 This patch changes v3 extended_status API to v2.1 and makes v2 unit tests share between v2 and v2.1. Revert v3 changes back to v2. os-extended-status:* -> OS-EXT-STS:* The differences between v2 and v3 are descibed on the wiki page https://wiki.openstack.org/wiki/NovaAPIv2tov3. Partially implements blueprint v2-on-v3-api Change-Id: Ieefca1d0e8da831830f43cc83eb50bc7e7f4a10c --- .../all_extensions/server-get-resp.json | 8 +- .../all_extensions/servers-details-resp.json | 8 +- .../os-extended-status/server-get-resp.json | 8 +- .../servers-detail-resp.json | 8 +- .../compute/plugins/v3/extended_status.py | 2 +- .../compute/contrib/test_extended_status.py | 51 +++++--- .../plugins/v3/test_extended_status.py | 119 ------------------ .../all_extensions/server-get-resp.json.tpl | 8 +- .../servers-details-resp.json.tpl | 8 +- .../server-get-resp.json.tpl | 8 +- .../servers-detail-resp.json.tpl | 8 +- 11 files changed, 68 insertions(+), 168 deletions(-) delete mode 100644 nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py diff --git a/doc/v3/api_samples/all_extensions/server-get-resp.json b/doc/v3/api_samples/all_extensions/server-get-resp.json index d06c2f63a9..3291176092 100644 --- a/doc/v3/api_samples/all_extensions/server-get-resp.json +++ b/doc/v3/api_samples/all_extensions/server-get-resp.json @@ -53,10 +53,10 @@ "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "os-extended-status:locked_by": null, - "os-extended-status:power_state": 1, - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", + "OS-EXT-STS:locked_by": null, + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "os-pci:pci_devices": [ { diff --git a/doc/v3/api_samples/all_extensions/servers-details-resp.json b/doc/v3/api_samples/all_extensions/servers-details-resp.json index d6084e1c21..1e072444b3 100644 --- a/doc/v3/api_samples/all_extensions/servers-details-resp.json +++ b/doc/v3/api_samples/all_extensions/servers-details-resp.json @@ -54,10 +54,10 @@ "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "os-extended-status:locked_by": null, - "os-extended-status:power_state": 1, - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", + "OS-EXT-STS:locked_by": null, + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "os-pci:pci_devices": [ { diff --git a/doc/v3/api_samples/os-extended-status/server-get-resp.json b/doc/v3/api_samples/os-extended-status/server-get-resp.json index 6402094949..120486f36b 100644 --- a/doc/v3/api_samples/os-extended-status/server-get-resp.json +++ b/doc/v3/api_samples/os-extended-status/server-get-resp.json @@ -46,10 +46,10 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-extended-status:locked_by": null, - "os-extended-status:power_state": 1, - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", + "OS-EXT-STS:locked_by": null, + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", "progress": 0, "status": "ACTIVE", "tenant_id": "openstack", diff --git a/doc/v3/api_samples/os-extended-status/servers-detail-resp.json b/doc/v3/api_samples/os-extended-status/servers-detail-resp.json index 77fd2afa72..00cb68786d 100644 --- a/doc/v3/api_samples/os-extended-status/servers-detail-resp.json +++ b/doc/v3/api_samples/os-extended-status/servers-detail-resp.json @@ -47,10 +47,10 @@ "My Server Name": "Apache1" }, "name": "new-server-test", - "os-extended-status:locked_by": null, - "os-extended-status:power_state": 1, - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", + "OS-EXT-STS:locked_by": null, + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", "progress": 0, "status": "ACTIVE", "tenant_id": "openstack", diff --git a/nova/api/openstack/compute/plugins/v3/extended_status.py b/nova/api/openstack/compute/plugins/v3/extended_status.py index 66258c6a9f..0b4fb4de09 100644 --- a/nova/api/openstack/compute/plugins/v3/extended_status.py +++ b/nova/api/openstack/compute/plugins/v3/extended_status.py @@ -29,7 +29,7 @@ def __init__(self, *args, **kwargs): def _extend_server(self, server, instance): for state in ['task_state', 'vm_state', 'power_state', 'locked_by']: - key = "%s:%s" % (ExtendedStatus.alias, state) + key = "%s:%s" % ('OS-EXT-STS', state) server[key] = instance[state] @wsgi.extends diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/api/openstack/compute/contrib/test_extended_status.py index 7269437040..d75296232f 100644 --- a/nova/tests/api/openstack/compute/contrib/test_extended_status.py +++ b/nova/tests/api/openstack/compute/contrib/test_extended_status.py @@ -52,28 +52,31 @@ def fake_compute_get_all(*args, **kwargs): db_list, fields) -class ExtendedStatusTest(test.TestCase): +class ExtendedStatusTestV21(test.TestCase): content_type = 'application/json' prefix = 'OS-EXT-STS:' + fake_url = '/v3' + + def _set_flags(self): + pass + + def _make_request(self, url): + req = webob.Request.blank(url) + req.headers['Accept'] = self.content_type + res = req.get_response(fakes.wsgi_app_v3( + init_only=('servers', + 'os-extended-status'))) + return res def setUp(self): - super(ExtendedStatusTest, self).setUp() + super(ExtendedStatusTestV21, self).setUp() fakes.stub_out_nw_api(self.stubs) self.stubs.Set(compute.api.API, 'get', fake_compute_get) self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all) - self.flags( - osapi_compute_extension=[ - 'nova.api.openstack.compute.contrib.select_extensions'], - osapi_compute_ext_list=['Extended_status']) + self._set_flags() return_server = fakes.fake_instance_get() self.stubs.Set(db, 'instance_get_by_uuid', return_server) - def _make_request(self, url): - req = webob.Request.blank(url) - req.headers['Accept'] = self.content_type - res = req.get_response(fakes.wsgi_app(init_only=('servers',))) - return res - def _get_server(self, body): return jsonutils.loads(body).get('server') @@ -87,7 +90,7 @@ def assertServerStates(self, server, vm_state, power_state, task_state): self.assertEqual(server.get('%stask_state' % self.prefix), task_state) def test_show(self): - url = '/v2/fake/servers/%s' % UUID3 + url = self.fake_url + '/servers/%s' % UUID3 res = self._make_request(url) self.assertEqual(res.status_int, 200) @@ -97,7 +100,7 @@ def test_show(self): task_state='kayaking') def test_detail(self): - url = '/v2/fake/servers/detail' + url = self.fake_url + '/servers/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) @@ -113,13 +116,29 @@ def fake_compute_get(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(compute.api.API, 'get', fake_compute_get) - url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' + url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' res = self._make_request(url) self.assertEqual(res.status_int, 404) -class ExtendedStatusXmlTest(ExtendedStatusTest): +class ExtendedStatusTestV2(ExtendedStatusTestV21): + fake_url = '/v2/fake' + + def _set_flags(self): + self.flags( + osapi_compute_extension=[ + 'nova.api.openstack.compute.contrib.select_extensions'], + osapi_compute_ext_list=['Extended_status']) + + def _make_request(self, url): + req = webob.Request.blank(url) + req.headers['Accept'] = self.content_type + res = req.get_response(fakes.wsgi_app(init_only=('servers',))) + return res + + +class ExtendedStatusXmlTest(ExtendedStatusTestV2): content_type = 'application/xml' prefix = '{%s}' % extended_status.Extended_status.namespace diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py b/nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py deleted file mode 100644 index 32053abd9d..0000000000 --- a/nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -from nova import compute -from nova import db -from nova import exception -from nova import objects -from nova.objects import instance as instance_obj -from nova.openstack.common import jsonutils -from nova import test -from nova.tests.api.openstack import fakes -from nova.tests import fake_instance - -UUID1 = '00000000-0000-0000-0000-000000000001' -UUID2 = '00000000-0000-0000-0000-000000000002' -UUID3 = '00000000-0000-0000-0000-000000000003' - - -def fake_compute_get(*args, **kwargs): - inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking", - vm_state="slightly crunchy", power_state=1, locked_by='owner') - return fake_instance.fake_instance_obj(args[1], **inst) - - -def fake_compute_get_all(*args, **kwargs): - db_list = [ - fakes.stub_instance(1, uuid=UUID1, task_state="task-1", - vm_state="vm-1", power_state=1, locked_by=None), - fakes.stub_instance(2, uuid=UUID2, task_state="task-2", - vm_state="vm-2", power_state=2, locked_by='admin'), - ] - fields = instance_obj.INSTANCE_DEFAULT_FIELDS - return instance_obj._make_instance_list(args[1], - objects.InstanceList(), - db_list, fields) - - -class ExtendedStatusTest(test.TestCase): - content_type = 'application/json' - prefix = 'os-extended-status:' - - def setUp(self): - super(ExtendedStatusTest, self).setUp() - fakes.stub_out_nw_api(self.stubs) - self.stubs.Set(compute.api.API, 'get', fake_compute_get) - self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all) - return_server = fakes.fake_instance_get() - self.stubs.Set(db, 'instance_get_by_uuid', return_server) - - def _make_request(self, url): - req = webob.Request.blank(url) - req.headers['Accept'] = self.content_type - res = req.get_response(fakes.wsgi_app_v3( - init_only=('servers', - 'os-extended-status'))) - return res - - def _get_server(self, body): - return jsonutils.loads(body).get('server') - - def _get_servers(self, body): - return jsonutils.loads(body).get('servers') - - def assertServerStates(self, server, vm_state, power_state, task_state, - locked_by): - self.assertEqual(server.get('%svm_state' % self.prefix), vm_state) - self.assertEqual(int(server.get('%spower_state' % self.prefix)), - power_state) - self.assertEqual(server.get('%stask_state' % self.prefix), task_state) - self.assertEqual(str(server.get('%slocked_by' % self.prefix)), - locked_by) - - def test_show(self): - url = '/v3/servers/%s' % UUID3 - res = self._make_request(url) - - self.assertEqual(res.status_int, 200) - self.assertServerStates(self._get_server(res.body), - vm_state='slightly crunchy', - power_state=1, - task_state='kayaking', - locked_by='owner') - - def test_detail(self): - url = '/v3/servers/detail' - res = self._make_request(url) - - self.assertEqual(res.status_int, 200) - for i, server in enumerate(self._get_servers(res.body)): - self.assertServerStates(server, - vm_state='vm-%s' % (i + 1), - power_state=(i + 1), - task_state='task-%s' % (i + 1), - locked_by=['None', 'admin'][i]) - - def test_no_instance_passthrough_404(self): - - def fake_compute_get(*args, **kwargs): - raise exception.InstanceNotFound(instance_id='fake') - - self.stubs.Set(compute.api.API, 'get', fake_compute_get) - url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115' - res = self._make_request(url) - - self.assertEqual(res.status_int, 404) diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl index 064724d04b..b33bf8786f 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl @@ -53,10 +53,10 @@ "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "os-extended-status:locked_by": null, - "os-extended-status:power_state": 1, - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", + "OS-EXT-STS:locked_by": null, + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "os-pci:pci_devices": [{"id": 1}], "os-server-usage:launched_at": "%(strtime)s", diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl index 1c61308718..cf7cb6897e 100644 --- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl @@ -54,10 +54,10 @@ "OS-EXT-SRV-ATTR:host": "%(compute_host)s", "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "os-extended-status:locked_by": null, - "os-extended-status:power_state": 1, - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", + "OS-EXT-STS:locked_by": null, + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "os-pci:pci_devices": [{"id": 1}], "os-server-usage:launched_at": "%(strtime)s", diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl index a416cc6fc5..17e5d594a4 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl @@ -1,9 +1,9 @@ { "server": { - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", - "os-extended-status:power_state": 1, - "os-extended-status:locked_by": null, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:locked_by": null, "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl index 06eb488262..10283a2c58 100644 --- a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl +++ b/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl @@ -1,10 +1,10 @@ { "servers": [ { - "os-extended-status:task_state": null, - "os-extended-status:vm_state": "active", - "os-extended-status:power_state": 1, - "os-extended-status:locked_by": null, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:locked_by": null, "updated": "%(isotime)s", "created": "%(isotime)s", "addresses": { From 674954f731bf4b66356fadaa5baaeb58279c5832 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Fri, 8 Aug 2014 11:48:17 +0930 Subject: [PATCH 481/486] Allow attaching external networks based on configurable policy Commit da66d50010d5b1ba1d7fc9c3d59d81b6c01bb0b0 restricted attaching external networks to admin clients. This patch changes it to a policy based check instead with the default setting being admin only. This allows operators to more precisely configure who they wish to allow to attach external networks without having to give them admin access Change-Id: I59e71f117f889f2abffddc36c1870ef1e0fe3711 DocImpact: Adds network:attach_external_network policy Closes-Bug: #1352102 --- etc/nova/policy.json | 3 ++- nova/api/openstack/extensions.py | 12 ++++++++++-- nova/network/neutronv2/api.py | 6 +++++- nova/tests/fake_policy.py | 3 ++- nova/tests/network/test_neutronv2.py | 9 +++++++++ 5 files changed, 28 insertions(+), 5 deletions(-) diff --git a/etc/nova/policy.json b/etc/nova/policy.json index b3948c495e..89544a8009 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -329,5 +329,6 @@ "network:get_dns_entries_by_name": "", "network:create_private_dns_domain": "", "network:create_public_dns_domain": "", - "network:delete_dns_domain": "" + "network:delete_dns_domain": "", + "network:attach_external_network": "rule:admin_api" } diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index f475a82ebf..cb23a67605 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -395,8 +395,8 @@ def extension_authorizer(api_name, extension_name): return core_authorizer('%s_extension' % api_name, extension_name) -def soft_extension_authorizer(api_name, extension_name): - hard_authorize = extension_authorizer(api_name, extension_name) +def soft_authorizer(hard_authorizer, api_name, extension_name): + hard_authorize = hard_authorizer(api_name, extension_name) def authorize(context, action=None): try: @@ -407,6 +407,14 @@ def authorize(context, action=None): return authorize +def soft_extension_authorizer(api_name, extension_name): + return soft_authorizer(extension_authorizer, api_name, extension_name) + + +def soft_core_authorizer(api_name, extension_name): + return soft_authorizer(core_authorizer, api_name, extension_name) + + def check_compute_policy(context, action, target, scope='compute'): _action = '%s:%s' % (scope, action) nova.policy.enforce(context, _action, target) diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 78e6590f7d..eb0d40e070 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -20,6 +20,7 @@ from neutronclient.common import exceptions as neutron_client_exc from oslo.config import cfg +from nova.api.openstack import extensions from nova.compute import flavors from nova.compute import utils as compute_utils from nova import conductor @@ -120,6 +121,9 @@ CONF.import_opt('flat_injected', 'nova.network.manager') LOG = logging.getLogger(__name__) +soft_external_network_attach_authorize = extensions.soft_core_authorizer( + 'network', 'attach_external_network') + class API(base_api.NetworkAPI): """API for interacting with the neutron 2.x API.""" @@ -164,7 +168,7 @@ def _get_available_networks(self, context, project_id, nets, net_ids) - if not context.is_admin: + if not soft_external_network_attach_authorize(context): for net in nets: # Perform this check here rather than in validate_networks to # ensure the check is performed every time diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index f4d74218ba..29e1b3ed63 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -385,6 +385,7 @@ "network:get_dns_entries_by_name": "", "network:create_private_dns_domain": "", "network:create_public_dns_domain": "", - "network:delete_dns_domain": "" + "network:delete_dns_domain": "", + "network:attach_external_network": "rule:admin_api" } """ diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py index a6f2b44ed4..2b4d2a6e90 100644 --- a/nova/tests/network/test_neutronv2.py +++ b/nova/tests/network/test_neutronv2.py @@ -33,6 +33,8 @@ from nova.network.neutronv2 import api as neutronapi from nova.network.neutronv2 import constants from nova.openstack.common import jsonutils +from nova.openstack.common import policy as common_policy +from nova import policy from nova import test from nova.tests import fake_instance from nova import utils @@ -1748,6 +1750,13 @@ def test_get_available_networks_with_externalnet_admin_ctx(self): self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids, context=admin_ctx) + def test_get_available_networks_with_custom_policy(self): + rules = {'network:attach_external_network': + common_policy.parse_rule('')} + policy.set_rules(rules) + req_ids = [net['id'] for net in self.nets5] + self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids) + def test_get_floating_ip_pools(self): api = neutronapi.API() search_opts = {'router:external': True} From 56a9512f1443c8df3cfb0153d358c8cd8da50336 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 12 Aug 2014 09:49:29 +0200 Subject: [PATCH 482/486] filter: add per-aggregate filter to configure disk_allocation_ratio Adds a filter AggregateDiskFilter which provides the ability to read from aggregates metadata the "disk_allocation_ratio". DocImpact Implements: blueprint per-aggregate-disk-allocation-ratio Change-Id: I79d59fcdfb09e67ed6f12113615c673624b24a19 --- doc/source/devref/filter_scheduler.rst | 6 ++++ nova/scheduler/filters/disk_filter.py | 35 +++++++++++++++++- nova/tests/scheduler/test_host_filters.py | 43 +++++++++++++++++++++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst index 98620a9279..479324fef1 100644 --- a/doc/source/devref/filter_scheduler.rst +++ b/doc/source/devref/filter_scheduler.rst @@ -94,6 +94,11 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`): ``disk_allocation_ration`` setting. It's virtual disk to physical disk allocation ratio and it's 1.0 by default. The total allow allocated disk size will be physical disk multiplied this ratio. +* |AggregateDiskFilter| - filters hosts by disk allocation with per-aggregate + ``disk_allocation_ratio`` setting. If no per-aggregate value is found, it will + fall back to the global default ``disk_allocation_ratio``. If more than one value + is found for a host (meaning the host is in two or more different aggregates with + different ratio settings), the minimum value will be used. * |NumInstancesFilter| - filters hosts by number of running instances on it. hosts with too many instances will be filtered. ``max_instances_per_host`` setting. Maximum number of instances allowed to run on @@ -360,6 +365,7 @@ in :mod:``nova.tests.scheduler``. .. |RamFilter| replace:: :class:`RamFilter ` .. |AggregateRamFilter| replace:: :class:`AggregateRamFilter ` .. |DiskFilter| replace:: :class:`DiskFilter ` +.. |AggregateDiskFilter| replace:: :class:`AggregateDiskFilter ` .. |NumInstancesFilter| replace:: :class:`NumInstancesFilter ` .. |IoOpsFilter| replace:: :class:`IoOpsFilter ` .. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter ` diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py index b9c4004013..a16a3d2094 100644 --- a/nova/scheduler/filters/disk_filter.py +++ b/nova/scheduler/filters/disk_filter.py @@ -15,8 +15,10 @@ from oslo.config import cfg +from nova.i18n import _LW from nova.openstack.common import log as logging from nova.scheduler import filters +from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) @@ -30,6 +32,9 @@ class DiskFilter(filters.BaseHostFilter): """Disk Filter with over subscription flag.""" + def _get_disk_allocation_ratio(self, host_state, filter_properties): + return CONF.disk_allocation_ratio + def host_passes(self, host_state, filter_properties): """Filter based on disk usage.""" instance_type = filter_properties.get('instance_type') @@ -40,7 +45,10 @@ def host_passes(self, host_state, filter_properties): free_disk_mb = host_state.free_disk_mb total_usable_disk_mb = host_state.total_usable_disk_gb * 1024 - disk_mb_limit = total_usable_disk_mb * CONF.disk_allocation_ratio + disk_allocation_ratio = self._get_disk_allocation_ratio( + host_state, filter_properties) + + disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio used_disk_mb = total_usable_disk_mb - free_disk_mb usable_disk_mb = disk_mb_limit - used_disk_mb @@ -55,3 +63,28 @@ def host_passes(self, host_state, filter_properties): disk_gb_limit = disk_mb_limit / 1024 host_state.limits['disk_gb'] = disk_gb_limit return True + + +class AggregateDiskFilter(DiskFilter): + """AggregateDiskFilter with per-aggregate disk allocation ratio flag. + + Fall back to global disk_allocation_ratio if no per-aggregate setting + found. + """ + + def _get_disk_allocation_ratio(self, host_state, filter_properties): + # TODO(uni): DB query in filter is a performance hit, especially for + # system with lots of hosts. Will need a general solution here to fix + # all filters with aggregate DB call things. + aggregate_vals = utils.aggregate_values_from_db( + filter_properties['context'], + host_state.host, + 'disk_allocation_ratio') + try: + ratio = utils.validate_num_values( + aggregate_vals, CONF.disk_allocation_ratio, cast_to=float) + except ValueError as e: + LOG.warn(_LW("Could not decode disk_allocation_ratio: '%s'"), e) + ratio = CONF.disk_allocation_ratio + + return ratio diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 62f00068c3..2b1fdb09f2 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -1927,3 +1927,46 @@ def test_aggregate_filter_num_iops_value_error(self): metadata={'max_io_ops_per_host': 'XXX'}) filter_properties = {'context': self.context} self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + def test_aggregate_disk_filter_value_error(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['AggregateDiskFilter']() + self.flags(disk_allocation_ratio=1.0) + filter_properties = { + 'context': self.context, + 'instance_type': {'root_gb': 1, + 'ephemeral_gb': 1, + 'swap': 1024}} + service = {'disabled': False} + host = fakes.FakeHostState('host1', 'node1', + {'free_disk_mb': 3 * 1024, + 'total_usable_disk_gb': 1, + 'service': service}) + self._create_aggregate_with_host(name='fake_aggregate', + hosts=['host1'], + metadata={'disk_allocation_ratio': 'XXX'}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) + + def test_aggregate_disk_filter_default_value(self): + self._stub_service_is_up(True) + filt_cls = self.class_map['AggregateDiskFilter']() + self.flags(disk_allocation_ratio=1.0) + filter_properties = { + 'context': self.context, + 'instance_type': {'root_gb': 2, + 'ephemeral_gb': 1, + 'swap': 1024}} + service = {'disabled': False} + host = fakes.FakeHostState('host1', 'node1', + {'free_disk_mb': 3 * 1024, + 'total_usable_disk_gb': 1, + 'service': service}) + # Uses global conf. + self.assertFalse(filt_cls.host_passes(host, filter_properties)) + + # Uses an aggregate with ratio + self._create_aggregate_with_host( + name='fake_aggregate', + hosts=['host1'], + metadata={'disk_allocation_ratio': '2'}) + self.assertTrue(filt_cls.host_passes(host, filter_properties)) From 661dde000224b3336bc0012564645d4a0d7a6826 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 26 Aug 2014 09:45:46 -0700 Subject: [PATCH 483/486] A minor change to a comments The service list version information does not match, fix it. Change-Id: Ibc88aedcf63bbcc5a36fd3a3bd92d0a0f0424fb2 --- nova/objects/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/objects/service.py b/nova/objects/service.py index 23bab6fa10..6d53930c43 100644 --- a/nova/objects/service.py +++ b/nova/objects/service.py @@ -137,7 +137,7 @@ def destroy(self, context): class ServiceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Service <= version 1.2 - # Version 1.2 Service version 1.3 + # Version 1.1 Service version 1.3 VERSION = '1.1' fields = { From e1a32b0b7551facde7188b24830358bd98c383e1 Mon Sep 17 00:00:00 2001 From: Michele Paolino Date: Fri, 1 Aug 2014 18:38:39 +0200 Subject: [PATCH 484/486] Add VIF_VHOSTUSER This is based on a new QEMU feature called vhost-user for connecting to a user-space vswitch via a unix socket. Change-Id: I0a11e89d3a0bbdfccac569c09009e576ea2e1173 Signed-off-by: Michele Paolino --- nova/network/model.py | 6 +++- nova/tests/virt/libvirt/test_config.py | 17 +++++++++++ nova/tests/virt/libvirt/test_vif.py | 40 ++++++++++++++++++++++++++ nova/virt/libvirt/config.py | 7 +++++ nova/virt/libvirt/designer.py | 12 ++++++++ nova/virt/libvirt/vif.py | 14 +++++++++ 6 files changed, 95 insertions(+), 1 deletion(-) diff --git a/nova/network/model.py b/nova/network/model.py index 2829d7bda8..441dbf9cb8 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -38,6 +38,7 @@ def ensure_string_keys(d): VIF_TYPE_802_QBH = '802.1qbh' VIF_TYPE_MLNX_DIRECT = 'mlnx_direct' VIF_TYPE_MIDONET = 'midonet' +VIF_TYPE_VHOSTUSER = 'vhostuser' VIF_TYPE_OTHER = 'other' # Constants for dictionary keys in the 'vif_details' field in the VIF @@ -273,6 +274,7 @@ class VIF(Model): def __init__(self, id=None, address=None, network=None, type=None, details=None, devname=None, ovs_interfaceid=None, qbh_params=None, qbg_params=None, active=False, + vhostuser_mode=None, vhostuser_path=None, **kwargs): super(VIF, self).__init__() @@ -287,13 +289,15 @@ def __init__(self, id=None, address=None, network=None, type=None, self['qbh_params'] = qbh_params self['qbg_params'] = qbg_params self['active'] = active + self['vhostuser_path'] = vhostuser_path + self['vhostuser_mode'] = vhostuser_mode self._set_meta(kwargs) def __eq__(self, other): keys = ['id', 'address', 'network', 'type', 'details', 'devname', 'ovs_interfaceid', 'qbh_params', 'qbg_params', - 'active'] + 'active', 'vhostuser_path', 'vhostuser_mode'] return all(self[k] == other[k] for k in keys) def __ne__(self, other): diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py index a95a460ea9..a45088c1cb 100644 --- a/nova/tests/virt/libvirt/test_config.py +++ b/nova/tests/virt/libvirt/test_config.py @@ -1129,6 +1129,23 @@ def test_config_direct(self): """) + def test_config_vhostuser(self): + obj = config.LibvirtConfigGuestInterface() + obj.net_type = "vhostuser" + obj.vhostuser_type = "unix" + obj.vhostuser_path = "/tmp/vhostuser.sock" + obj.vhostuser_mode = "server" + obj.mac_addr = "DE:AD:BE:EF:CA:FE" + obj.model = "virtio" + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + + + + + """) + class LibvirtConfigGuestTest(LibvirtConfigBaseTest): diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py index 3c4e409c5c..2d33f618d2 100644 --- a/nova/tests/virt/libvirt/test_vif.py +++ b/nova/tests/virt/libvirt/test_vif.py @@ -228,6 +228,20 @@ class LibvirtVifTestCase(test.TestCase): type=network_model.VIF_TYPE_MIDONET, devname='tap-xxx-yyy-zzz') + vif_vhostuser_defpath = network_model.VIF(id='vif-xxx-yyy-zzz', + address='ca:fe:de:ad:be:ef', + type=network_model. + VIF_TYPE_VHOSTUSER, + vhostuser_mode='server') + + vif_vhostuser_custpath = network_model.VIF(id='vif-xxx-yyy-zzz', + address='ca:fe:de:ad:be:ef', + type=network_model. + VIF_TYPE_VHOSTUSER, + vhostuser_path= + '/tmp/custompath.sock', + vhostuser_mode='server') + vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz', address='ca:fe:de:ad:be:ef', network=network_bridge, @@ -847,6 +861,32 @@ def test_midonet_ethernet_vif_driver(self): self._assertTypeAndMacEquals(node, "ethernet", "target", "dev", self.vif_midonet, br_want) + def test_vhostuser_defpath_vif_driver(self): + d = vif.LibvirtGenericVIFDriver(self._get_conn()) + xml = self._get_instance_xml(d, self.vif_vhostuser_defpath) + node = self._get_node(xml) + self._assertTypeEquals(node, "vhostuser", "source", "type", + "unix") + self._assertTypeEquals(node, "vhostuser", "source", "path", + "/var/lib/libvirt/qemu/vhostuser") + self._assertTypeEquals(node, "vhostuser", "source", "mode", + "server") + self._assertMacEquals(node, self.vif_vhostuser_defpath) + self._assertModel(xml, network_model.VIF_MODEL_VIRTIO) + + def test_vhostuser_custpath_vif_driver(self): + d = vif.LibvirtGenericVIFDriver(self._get_conn()) + xml = self._get_instance_xml(d, self.vif_vhostuser_custpath) + node = self._get_node(xml) + self._assertTypeEquals(node, "vhostuser", "source", "type", + "unix") + self._assertTypeEquals(node, "vhostuser", "source", "path", + "/tmp/custompath.sock") + self._assertTypeEquals(node, "vhostuser", "source", "mode", + "server") + self._assertMacEquals(node, self.vif_vhostuser_custpath) + self._assertModel(xml, network_model.VIF_MODEL_VIRTIO) + def test_generic_8021qbh_driver(self): d = vif.LibvirtGenericVIFDriver(self._get_conn()) xml = self._get_instance_xml(d, self.vif_8021qbh) diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index adc5c80196..98db0942e0 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -1023,6 +1023,9 @@ def __init__(self, **kwargs): **kwargs) self.net_type = None + self.vhostuser_type = None + self.vhostuser_path = None + self.vhostuser_mode = None self.target_dev = None self.model = None self.mac_addr = None @@ -1058,6 +1061,10 @@ def format_dom(self): elif self.net_type == "direct": dev.append(etree.Element("source", dev=self.source_dev, mode=self.source_mode)) + elif self.net_type == "vhostuser": + dev.append(etree.Element("source", type=self.vhostuser_type, + path=self.vhostuser_path, + mode=self.vhostuser_mode)) else: dev.append(etree.Element("source", bridge=self.source_dev)) diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py index 25e2a1c401..62c82aa591 100644 --- a/nova/virt/libvirt/designer.py +++ b/nova/virt/libvirt/designer.py @@ -56,6 +56,18 @@ def set_vif_host_backend_ethernet_config(conf, tapname): conf.script = "" +def set_vif_host_backend_vhostuser_config(conf, mode, path=None): + """Populate a LibvirtConfigGuestInterface instance + with vhostuser socket details + """ + + conf.net_type = "vhostuser" + # unix is the only supported type in libvirt + conf.vhostuser_type = "unix" + conf.vhostuser_path = path or "/var/lib/libvirt/qemu/vhostuser" + conf.vhostuser_mode = mode + + def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an OpenVSwitch bridge. diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 3abe5e8f57..45e52e924b 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -308,6 +308,14 @@ def get_config_mlnx_direct(self, instance, vif, image_meta, return conf + def get_config_vhostuser(self, instance, vif, image_meta, + inst_type, virt_type): + conf = self.get_base_config(instance, vif, image_meta, + inst_type, virt_type) + designer.set_vif_host_backend_vhostuser_config(conf, + vif['vhostuser_mode'], vif['vhostuser_path']) + return conf + def get_config(self, instance, vif, image_meta, inst_type, virt_type): vif_type = vif['type'] @@ -495,6 +503,9 @@ def plug_iovisor(self, instance, vif): except processutils.ProcessExecutionError: LOG.exception(_LE("Failed while plugging vif"), instance=instance) + def plug_vhostuser(self, instance, vif): + pass + def plug(self, instance, vif): vif_type = vif['type'] @@ -639,6 +650,9 @@ def unplug_iovisor(self, instance, vif): LOG.exception(_LE("Failed while unplugging vif"), instance=instance) + def unplug_vhostuser(self, instance, vif): + pass + def unplug(self, instance, vif): vif_type = vif['type'] From 3154f2cb0eb0d8c447c6d18ee0bab5e7dbeba60b Mon Sep 17 00:00:00 2001 From: sraho Date: Thu, 18 Apr 2019 11:47:40 +0200 Subject: [PATCH 485/486] Known moderate severity security vulnerability detected in SQLAlchemy < 1.3.0 Known moderate severity security vulnerability detected in SQLAlchemy < 1.3.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 72f9737287..e94e633eb2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ pbr>=0.6,!=0.7,<1.0 -SQLAlchemy>=0.8.4,<=0.8.99,>=0.9.7,<=0.9.99 +SQLAlchemy ~> 1.3.0 anyjson>=0.3.3 argparse boto>=2.12.0,!=2.13.0 From 90bc9e2ad2d38a9c4dcb1aea1a45af41015da560 Mon Sep 17 00:00:00 2001 From: Bogdan Popescu <68062990+bopopescu@users.noreply.github.com> Date: Fri, 24 Jul 2020 09:42:01 +0300 Subject: [PATCH 486/486] Removing the master-slave language --- doc/source/conf.py | 4 +- .../openstack/compute/contrib/extended_ips.py | 8 +- .../compute/contrib/extended_ips_mac.py | 8 +- .../contrib/extended_server_attributes.py | 8 +- .../compute/contrib/extended_status.py | 8 +- .../extended_virtual_interfaces_net.py | 4 +- .../compute/contrib/extended_volumes.py | 8 +- .../compute/contrib/flavor_access.py | 12 +- .../openstack/compute/contrib/image_size.py | 8 +- .../openstack/compute/contrib/server_usage.py | 8 +- nova/api/openstack/compute/servers.py | 16 +- nova/api/openstack/wsgi.py | 2 +- nova/api/openstack/xmlutil.py | 120 +++++----- nova/compute/manager.py | 64 +++--- nova/compute/rpcapi.py | 10 +- nova/conductor/manager.py | 4 +- nova/conductor/rpcapi.py | 2 +- nova/console/xvp.py | 2 +- nova/db/api.py | 30 +-- nova/db/sqlalchemy/api.py | 102 ++++----- nova/network/ldapdns.py | 8 +- nova/network/linux_net.py | 2 +- nova/network/manager.py | 4 +- nova/network/neutronv2/api.py | 8 +- nova/objects/block_device.py | 6 +- nova/objects/instance.py | 32 +-- nova/objects/migration.py | 6 +- nova/objects/virtual_interface.py | 4 +- .../common/db/sqlalchemy/migration.py | 2 +- nova/openstack/common/gettextutils.py | 4 +- nova/tests/api/ec2/test_cloud.py | 6 +- .../compute/contrib/test_disk_config.py | 2 +- .../compute/contrib/test_instance_actions.py | 6 +- .../compute/contrib/test_security_groups.py | 10 +- .../compute/contrib/test_server_start_stop.py | 2 +- .../openstack/compute/contrib/test_shelve.py | 2 +- .../openstack/compute/contrib/test_volumes.py | 2 +- .../plugins/v3/test_instance_actions.py | 6 +- .../compute/plugins/v3/test_server_actions.py | 6 +- .../plugins/v3/test_server_metadata.py | 6 +- .../compute/plugins/v3/test_servers.py | 20 +- .../compute/plugins/v3/test_shelve.py | 2 +- .../openstack/compute/test_server_actions.py | 6 +- .../openstack/compute/test_server_metadata.py | 6 +- .../api/openstack/compute/test_servers.py | 14 +- nova/tests/api/openstack/fakes.py | 8 +- nova/tests/api/openstack/test_xmlutil.py | 208 +++++++++--------- nova/tests/compute/test_compute.py | 56 ++--- nova/tests/compute/test_compute_api.py | 6 +- nova/tests/compute/test_compute_mgr.py | 18 +- nova/tests/compute/test_compute_utils.py | 2 +- nova/tests/compute/test_compute_xen.py | 2 +- nova/tests/compute/test_rpcapi.py | 8 +- nova/tests/conductor/test_conductor.py | 16 +- nova/tests/db/test_db_api.py | 4 +- nova/tests/integrated/test_api_samples.py | 2 +- .../integrated/v3/test_extended_volumes.py | 2 +- nova/tests/network/test_api.py | 4 +- nova/tests/network/test_linux_net.py | 2 +- nova/tests/network/test_manager.py | 14 +- nova/tests/objects/test_instance.py | 56 ++--- nova/tests/objects/test_migration.py | 6 +- nova/tests/test_metadata.py | 2 +- nova/tests/virt/libvirt/test_driver.py | 2 +- nova/tests/virt/xenapi/test_xenapi.py | 88 ++++---- nova/virt/xenapi/client/session.py | 12 +- nova/virt/xenapi/fake.py | 4 +- nova/virt/xenapi/pool.py | 86 ++++---- nova/virt/xenapi/pool_states.py | 2 +- tools/db/schema_diff.py | 8 +- 70 files changed, 609 insertions(+), 609 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index f83589a895..66e94be986 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -55,8 +55,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'nova' diff --git a/nova/api/openstack/compute/contrib/extended_ips.py b/nova/api/openstack/compute/contrib/extended_ips.py index 6aadab5736..6b63b8feb8 100644 --- a/nova/api/openstack/compute/contrib/extended_ips.py +++ b/nova/api/openstack/compute/contrib/extended_ips.py @@ -47,7 +47,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsServerTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -59,7 +59,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsServersTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -96,7 +96,7 @@ def construct(self): root = xmlutil.TemplateElement('server', selector='server') xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips.alias: Extended_ips.namespace}) @@ -105,5 +105,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips.alias: Extended_ips.namespace}) diff --git a/nova/api/openstack/compute/contrib/extended_ips_mac.py b/nova/api/openstack/compute/contrib/extended_ips_mac.py index 79bce3cc76..c076b5747e 100644 --- a/nova/api/openstack/compute/contrib/extended_ips_mac.py +++ b/nova/api/openstack/compute/contrib/extended_ips_mac.py @@ -45,7 +45,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsMacServerTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -57,7 +57,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedIpsMacServersTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -93,7 +93,7 @@ class ExtendedIpsMacServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips_mac.alias: Extended_ips_mac.namespace}) @@ -102,5 +102,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_ips_mac.alias: Extended_ips_mac.namespace}) diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py index 9777412197..83370b660d 100644 --- a/nova/api/openstack/compute/contrib/extended_server_attributes.py +++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py @@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedServerAttributeTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -51,7 +51,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedServerAttributesTemplate()) servers = list(resp_obj.obj['servers']) @@ -92,7 +92,7 @@ def construct(self): make_server(root) alias = Extended_server_attributes.alias namespace = Extended_server_attributes.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class ExtendedServerAttributesTemplate(xmlutil.TemplateBuilder): @@ -102,4 +102,4 @@ def construct(self): make_server(elem) alias = Extended_server_attributes.alias namespace = Extended_server_attributes.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py index 5cdd1e8d42..d4be295d36 100644 --- a/nova/api/openstack/compute/contrib/extended_status.py +++ b/nova/api/openstack/compute/contrib/extended_status.py @@ -36,7 +36,7 @@ def _extend_server(self, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedStatusTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -48,7 +48,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedStatusesTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -86,7 +86,7 @@ class ExtendedStatusTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_status.alias: Extended_status.namespace}) @@ -95,5 +95,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_status.alias: Extended_status.namespace}) diff --git a/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py b/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py index a3dd4a3b6f..1b6350eecd 100644 --- a/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py +++ b/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py @@ -33,7 +33,7 @@ def construct(self): elem = xmlutil.SubTemplateElement(root, 'virtual_interface', selector='virtual_interfaces') make_vif(elem) - return xmlutil.SlaveTemplate(root, 1, + return xmlutil.SubordinateTemplate(root, 1, nsmap={Extended_virtual_interfaces_net.alias: Extended_virtual_interfaces_net.namespace}) @@ -48,7 +48,7 @@ def index(self, req, resp_obj, server_id): key = "%s:net_id" % Extended_virtual_interfaces_net.alias context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedVirtualInterfaceNetTemplate()) for vif in resp_obj.obj['virtual_interfaces']: vif1 = self.network_api.get_vif_by_mac_address(context, diff --git a/nova/api/openstack/compute/contrib/extended_volumes.py b/nova/api/openstack/compute/contrib/extended_volumes.py index f4af2f3d63..f2608c1c30 100644 --- a/nova/api/openstack/compute/contrib/extended_volumes.py +++ b/nova/api/openstack/compute/contrib/extended_volumes.py @@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedVolumesServerTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -51,7 +51,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ExtendedVolumesServersTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -90,7 +90,7 @@ class ExtendedVolumesServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_volumes.alias: Extended_volumes.namespace}) @@ -99,5 +99,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Extended_volumes.alias: Extended_volumes.namespace}) diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py index 198dfbb8e2..5932406269 100644 --- a/nova/api/openstack/compute/contrib/flavor_access.py +++ b/nova/api/openstack/compute/contrib/flavor_access.py @@ -46,7 +46,7 @@ def construct(self): make_flavor(root) alias = Flavor_access.alias namespace = Flavor_access.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class FlavorsTemplate(xmlutil.TemplateBuilder): @@ -56,7 +56,7 @@ def construct(self): make_flavor(elem) alias = Flavor_access.alias namespace = Flavor_access.namespace - return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace}) class FlavorAccessTemplate(xmlutil.TemplateBuilder): @@ -65,7 +65,7 @@ def construct(self): elem = xmlutil.SubTemplateElement(root, 'access', selector='flavor_access') make_flavor_access(elem) - return xmlutil.MasterTemplate(root, 1) + return xmlutil.MainTemplate(root, 1) def _marshall_flavor_access(flavor): @@ -127,7 +127,7 @@ def _extend_flavor(self, flavor_rval, flavor_ref): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if soft_authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=FlavorTemplate()) db_flavor = req.get_db_flavor(id) @@ -137,7 +137,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if soft_authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=FlavorsTemplate()) flavors = list(resp_obj.obj['flavors']) @@ -149,7 +149,7 @@ def detail(self, req, resp_obj): def create(self, req, body, resp_obj): context = req.environ['nova.context'] if soft_authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=FlavorTemplate()) db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id']) diff --git a/nova/api/openstack/compute/contrib/image_size.py b/nova/api/openstack/compute/contrib/image_size.py index c746415170..e54e461307 100644 --- a/nova/api/openstack/compute/contrib/image_size.py +++ b/nova/api/openstack/compute/contrib/image_size.py @@ -29,7 +29,7 @@ def construct(self): root = xmlutil.TemplateElement('images') elem = xmlutil.SubTemplateElement(root, 'image', selector='images') make_image(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Image_size.alias: Image_size.namespace}) @@ -37,7 +37,7 @@ class ImageSizeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('image', selector='image') make_image(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Image_size.alias: Image_size.namespace}) @@ -51,7 +51,7 @@ def _extend_image(self, image, image_cache): def show(self, req, resp_obj, id): context = req.environ["nova.context"] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ImageSizeTemplate()) image_resp = resp_obj.obj['image'] # image guaranteed to be in the cache due to the core API adding @@ -63,7 +63,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ImagesSizeTemplate()) images_resp = list(resp_obj.obj['images']) # images guaranteed to be in the cache due to the core API adding diff --git a/nova/api/openstack/compute/contrib/server_usage.py b/nova/api/openstack/compute/contrib/server_usage.py index 4dd5aa278a..9080d1392b 100644 --- a/nova/api/openstack/compute/contrib/server_usage.py +++ b/nova/api/openstack/compute/contrib/server_usage.py @@ -41,7 +41,7 @@ def _extend_server(self, server, instance): def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ServerUsageTemplate()) server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) @@ -53,7 +53,7 @@ def show(self, req, resp_obj, id): def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): - # Attach our slave template to the response object + # Attach our subordinate template to the response object resp_obj.attach(xml=ServerUsagesTemplate()) servers = list(resp_obj.obj['servers']) for server in servers: @@ -89,7 +89,7 @@ class ServerUsageTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Server_usage.alias: Server_usage.namespace}) @@ -98,5 +98,5 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) - return xmlutil.SlaveTemplate(root, 1, nsmap={ + return xmlutil.SubordinateTemplate(root, 1, nsmap={ Server_usage.alias: Server_usage.namespace}) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 5fa81d35a2..678bb6a7fd 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -124,7 +124,7 @@ class ServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server', selector='server') make_server(root, detailed=True) - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) class MinimalServersTemplate(xmlutil.TemplateBuilder): @@ -133,7 +133,7 @@ def construct(self): elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) xmlutil.make_links(root, 'servers_links') - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) class ServersTemplate(xmlutil.TemplateBuilder): @@ -141,27 +141,27 @@ def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem, detailed=True) - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) class ServerAdminPassTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server') root.set('adminPass') - return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.SubordinateTemplate(root, 1, nsmap=server_nsmap) class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server') root.set('reservation_id') - return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap) + return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap) def FullServerTemplate(): - master = ServerTemplate() - master.attach(ServerAdminPassTemplate()) - return master + main = ServerTemplate() + main.attach(ServerAdminPassTemplate()) + return main class CommonDeserializer(wsgi.MetadataXMLDeserializer): diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index cadae9d5a2..9a0b892bae 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -584,7 +584,7 @@ def preserialize(self, content_type, default_serializers=None): self.serializer = serializer() def attach(self, **kwargs): - """Attach slave templates to serializers.""" + """Attach subordinate templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py index 679b873a16..e2ae2acd85 100644 --- a/nova/api/openstack/xmlutil.py +++ b/nova/api/openstack/xmlutil.py @@ -672,13 +672,13 @@ def wrap(self): # We are a template return self - def apply(self, master): - """Hook method for determining slave applicability. + def apply(self, main): + """Hook method for determining subordinate applicability. An overridable hook method used to determine if this template - is applicable as a slave to a given master template. + is applicable as a subordinate to a given main template. - :param master: The master template to test. + :param main: The main template to test. """ return True @@ -693,17 +693,17 @@ def tree(self): return "%r: %s" % (self, self.root.tree()) -class MasterTemplate(Template): - """Represent a master template. +class MainTemplate(Template): + """Represent a main template. - Master templates are versioned derivatives of templates that - additionally allow slave templates to be attached. Slave + Main templates are versioned derivatives of templates that + additionally allow subordinate templates to be attached. Subordinate templates allow modification of the serialized result without - directly changing the master. + directly changing the main. """ def __init__(self, root, version, nsmap=None): - """Initialize a master template. + """Initialize a main template. :param root: The root element of the template. :param version: The version number of the template. @@ -712,9 +712,9 @@ def __init__(self, root, version, nsmap=None): template. """ - super(MasterTemplate, self).__init__(root, nsmap) + super(MainTemplate, self).__init__(root, nsmap) self.version = version - self.slaves = [] + self.subordinates = [] def __repr__(self): """Return string representation of the template.""" @@ -728,88 +728,88 @@ def _siblings(self): An overridable hook method to return the siblings of the root element. This is the root element plus the root elements of - all the slave templates. + all the subordinate templates. """ - return [self.root] + [slave.root for slave in self.slaves] + return [self.root] + [subordinate.root for subordinate in self.subordinates] def _nsmap(self): """Hook method for computing the namespace dictionary. An overridable hook method to return the namespace dictionary. - The namespace dictionary is computed by taking the master + The namespace dictionary is computed by taking the main template's namespace dictionary and updating it from all the - slave templates. + subordinate templates. """ nsmap = self.nsmap.copy() - for slave in self.slaves: - nsmap.update(slave._nsmap()) + for subordinate in self.subordinates: + nsmap.update(subordinate._nsmap()) return nsmap - def attach(self, *slaves): - """Attach one or more slave templates. + def attach(self, *subordinates): + """Attach one or more subordinate templates. - Attaches one or more slave templates to the master template. - Slave templates must have a root element with the same tag as - the master template. The slave template's apply() method will - be called to determine if the slave should be applied to this - master; if it returns False, that slave will be skipped. - (This allows filtering of slaves based on the version of the - master template.) + Attaches one or more subordinate templates to the main template. + Subordinate templates must have a root element with the same tag as + the main template. The subordinate template's apply() method will + be called to determine if the subordinate should be applied to this + main; if it returns False, that subordinate will be skipped. + (This allows filtering of subordinates based on the version of the + main template.) """ - slave_list = [] - for slave in slaves: - slave = slave.wrap() + subordinate_list = [] + for subordinate in subordinates: + subordinate = subordinate.wrap() # Make sure we have a tree match - if slave.root.tag != self.root.tag: - msg = _("Template tree mismatch; adding slave %(slavetag)s to " - "master %(mastertag)s") % {'slavetag': slave.root.tag, - 'mastertag': self.root.tag} + if subordinate.root.tag != self.root.tag: + msg = _("Template tree mismatch; adding subordinate %(subordinatetag)s to " + "main %(maintag)s") % {'subordinatetag': subordinate.root.tag, + 'maintag': self.root.tag} raise ValueError(msg) - # Make sure slave applies to this template - if not slave.apply(self): + # Make sure subordinate applies to this template + if not subordinate.apply(self): continue - slave_list.append(slave) + subordinate_list.append(subordinate) - # Add the slaves - self.slaves.extend(slave_list) + # Add the subordinates + self.subordinates.extend(subordinate_list) def copy(self): - """Return a copy of this master template.""" + """Return a copy of this main template.""" - # Return a copy of the MasterTemplate + # Return a copy of the MainTemplate tmp = self.__class__(self.root, self.version, self.nsmap) - tmp.slaves = self.slaves[:] + tmp.subordinates = self.subordinates[:] return tmp -class SlaveTemplate(Template): - """Represent a slave template. +class SubordinateTemplate(Template): + """Represent a subordinate template. - Slave templates are versioned derivatives of templates. Each - slave has a minimum version and optional maximum version of the - master template to which they can be attached. + Subordinate templates are versioned derivatives of templates. Each + subordinate has a minimum version and optional maximum version of the + main template to which they can be attached. """ def __init__(self, root, min_vers, max_vers=None, nsmap=None): - """Initialize a slave template. + """Initialize a subordinate template. :param root: The root element of the template. - :param min_vers: The minimum permissible version of the master - template for this slave template to apply. - :param max_vers: An optional upper bound for the master + :param min_vers: The minimum permissible version of the main + template for this subordinate template to apply. + :param max_vers: An optional upper bound for the main template version. :param nsmap: An optional namespace dictionary to be associated with the root element of the template. """ - super(SlaveTemplate, self).__init__(root, nsmap) + super(SubordinateTemplate, self).__init__(root, nsmap) self.min_vers = min_vers self.max_vers = max_vers @@ -820,23 +820,23 @@ def __repr__(self): (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self))) - def apply(self, master): - """Hook method for determining slave applicability. + def apply(self, main): + """Hook method for determining subordinate applicability. An overridable hook method used to determine if this template - is applicable as a slave to a given master template. This - version requires the master template to have a version number + is applicable as a subordinate to a given main template. This + version requires the main template to have a version number between min_vers and max_vers. - :param master: The master template to test. + :param main: The main template to test. """ - # Does the master meet our minimum version requirement? - if master.version < self.min_vers: + # Does the main meet our minimum version requirement? + if main.version < self.min_vers: return False # How about our maximum version requirement? - if self.max_vers is not None and master.version > self.max_vers: + if self.max_vers is not None and main.version > self.max_vers: return False return True diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9f0ec90447..f41ee75747 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -670,7 +670,7 @@ def _get_instances_on_driver(self, context, filters=None): driver_uuids = self.driver.list_instance_uuids() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( - context, filters, use_slave=True) + context, filters, use_subordinate=True) return local_instances except NotImplementedError: pass @@ -679,7 +679,7 @@ def _get_instances_on_driver(self, context, filters=None): # to brute force. driver_instances = self.driver.list_instances() instances = objects.InstanceList.get_by_filters(context, filters, - use_slave=True) + use_subordinate=True) name_map = dict((instance.name, instance) for instance in instances) local_instances = [] for driver_instance in driver_instances: @@ -1162,7 +1162,7 @@ def refresh_provider_fw_rules(self, context): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() - def _get_instance_nw_info(self, context, instance, use_slave=False): + def _get_instance_nw_info(self, context, instance, use_subordinate=False): """Get a list of dictionaries of network data of an instance.""" if (not hasattr(instance, 'system_metadata') or len(instance['system_metadata']) == 0): @@ -1173,7 +1173,7 @@ def _get_instance_nw_info(self, context, instance, use_slave=False): # succeed. instance = objects.Instance.get_by_uuid(context, instance['uuid'], - use_slave=use_slave) + use_subordinate=use_subordinate) network_info = self.network_api.get_instance_nw_info(context, instance) @@ -1520,7 +1520,7 @@ def _check_instance_build_time(self, context): 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) for instance in building_insts: if timeutils.is_older_than(instance['created_at'], timeout): @@ -5094,7 +5094,7 @@ def _heal_instance_info_cache(self, context): # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( - context, self.host, expected_attrs=[], use_slave=True) + context, self.host, expected_attrs=[], use_subordinate=True) for inst in db_instances: # We don't want to refersh the cache for instances # which are building or deleting so don't put them @@ -5124,7 +5124,7 @@ def _heal_instance_info_cache(self, context): inst = objects.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache'], - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue @@ -5147,7 +5147,7 @@ def _heal_instance_info_cache(self, context): try: # Call to network API to get instance info.. this will # force an update to the instance's info_cache - self._get_instance_nw_info(context, instance, use_slave=True) + self._get_instance_nw_info(context, instance, use_subordinate=True) LOG.debug('Updated the network info_cache for instance', instance=instance) except Exception: @@ -5163,7 +5163,7 @@ def _poll_rebooting_instances(self, context): filters = {'task_state': task_states.REBOOTING, 'host': self.host} rebooting = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=[], use_slave=True) + context, filters, expected_attrs=[], use_subordinate=True) to_poll = [] for instance in rebooting: @@ -5180,7 +5180,7 @@ def _poll_rescued_instances(self, context): 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], - use_slave=True) + use_subordinate=True) to_unrescue = [] for instance in rescued_instances: @@ -5198,7 +5198,7 @@ def _poll_unconfirmed_resizes(self, context): migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, - use_slave=True) + use_subordinate=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) @@ -5226,7 +5226,7 @@ def _set_migration_to_error(migration, reason, **kwargs): try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) @@ -5276,7 +5276,7 @@ def _poll_shelved_instances(self, context): 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], - use_slave=True) + use_subordinate=True) to_gc = [] for instance in shelved_instances: @@ -5308,7 +5308,7 @@ def _instance_usage_audit(self, context): instances = objects.InstanceList.get_active_by_window_joined( context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata'], - use_slave=True) + use_subordinate=True) num_instances = len(instances) errors = 0 successes = 0 @@ -5373,7 +5373,7 @@ def _poll_bandwidth_usage(self, context): instances = objects.InstanceList.get_by_host(context, self.host, - use_slave=True) + use_subordinate=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: @@ -5396,7 +5396,7 @@ def _poll_bandwidth_usage(self, context): last_ctr_in = None last_ctr_out = None # TODO(geekinutah): Once bw_usage_cache object is created - # need to revisit this and slaveify. + # need to revisit this and subordinateify. usage = self.conductor_api.bw_usage_get(context, bw_ctr['uuid'], start_time, @@ -5407,7 +5407,7 @@ def _poll_bandwidth_usage(self, context): last_ctr_in = usage['last_ctr_in'] last_ctr_out = usage['last_ctr_out'] else: - # TODO(geekinutah): Same here, pls slaveify + # TODO(geekinutah): Same here, pls subordinateify usage = self.conductor_api.bw_usage_get( context, bw_ctr['uuid'], prev_time, bw_ctr['mac_address']) @@ -5440,13 +5440,13 @@ def _poll_bandwidth_usage(self, context): last_refreshed=refreshed, update_cells=update_cells) - def _get_host_volume_bdms(self, context, use_slave=False): + def _get_host_volume_bdms(self, context, use_subordinate=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid, use_slave=use_slave) + context, instance.uuid, use_subordinate=use_subordinate) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) @@ -5474,7 +5474,7 @@ def _poll_volume_usage(self, context, start_time=None): start_time = utils.last_completed_audit_period()[1] compute_host_bdms = self._get_host_volume_bdms(context, - use_slave=True) + use_subordinate=True) if not compute_host_bdms: return @@ -5501,7 +5501,7 @@ def _sync_power_states(self, context): """ db_instances = objects.InstanceList.get_by_host(context, self.host, - use_slave=True) + use_subordinate=True) num_vm_instances = self.driver.get_num_instances() num_db_instances = len(db_instances) @@ -5545,14 +5545,14 @@ def _query_driver_power_state_and_sync(self, context, db_instance): self._sync_instance_power_state(context, db_instance, vm_power_state, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. pass def _sync_instance_power_state(self, context, db_instance, vm_power_state, - use_slave=False): + use_subordinate=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, @@ -5561,7 +5561,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. - db_instance.refresh(use_slave=use_slave) + db_instance.refresh(use_subordinate=use_subordinate) db_power_state = db_instance.power_state vm_state = db_instance.vm_state @@ -5711,7 +5711,7 @@ def _reclaim_queued_deletes(self, context): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, - use_slave=True) + use_subordinate=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -5790,7 +5790,7 @@ def _cleanup_running_deleted_instances(self, context): with utils.temporary_mutation(context, read_deleted="yes"): for instance in self._running_deleted_instances(context): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid, use_slave=True) + context, instance.uuid, use_subordinate=True) if action == "log": LOG.warning(_("Detected instance with name label " @@ -5889,11 +5889,11 @@ def _error_out_instance_on_exception(self, context, instance, @aggregate_object_compat @wrap_exception() - def add_aggregate_host(self, context, aggregate, host, slave_info): + def add_aggregate_host(self, context, aggregate, host, subordinate_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'add_aggregate_host') @@ -5906,11 +5906,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info): @aggregate_object_compat @wrap_exception() - def remove_aggregate_host(self, context, host, slave_info, aggregate): + def remove_aggregate_host(self, context, host, subordinate_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'remove_aggregate_host') @@ -5968,7 +5968,7 @@ def _run_image_cache_manager_pass(self, context): 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) self.driver.manage_image_cache(context, filtered_instances) @@ -5986,7 +5986,7 @@ def _run_pending_deletes(self, context): attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=attrs, use_slave=True) + context, filters, expected_attrs=attrs, use_subordinate=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index 7aeed520e7..9f7a602e3a 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -148,7 +148,7 @@ class ComputeAPI(object): * 2.0 - Remove 1.x backwards compat * 2.1 - Adds orig_sys_metadata to rebuild_instance() - * 2.2 - Adds slave_info parameter to add_aggregate_host() and + * 2.2 - Adds subordinate_info parameter to add_aggregate_host() and remove_aggregate_host() * 2.3 - Adds volume_id to reserve_block_device_name() * 2.4 - Add bdms to terminate_instance @@ -307,7 +307,7 @@ def _check_live_migration_api_version(self, server): raise exception.LiveMigrationWithOldNovaNotSafe(server=server) def add_aggregate_host(self, ctxt, aggregate, host_param, host, - slave_info=None): + subordinate_info=None): '''Add aggregate host. :param ctxt: request context @@ -328,7 +328,7 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host, cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'add_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): if self.client.can_send_version('3.12'): @@ -696,7 +696,7 @@ def refresh_provider_fw_rules(self, ctxt, host): cctxt.cast(ctxt, 'refresh_provider_fw_rules') def remove_aggregate_host(self, ctxt, aggregate, host_param, host, - slave_info=None): + subordinate_info=None): '''Remove aggregate host. :param ctxt: request context @@ -717,7 +717,7 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host, cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'remove_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def remove_fixed_ip_from_instance(self, ctxt, instance, address): if self.client.can_send_version('3.13'): diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 6d360e8e21..345aa53768 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -218,10 +218,10 @@ def block_device_mapping_get_all_by_instance(self, context, instance, def instance_get_all_by_filters(self, context, filters, sort_key, sort_dir, columns_to_join, - use_slave): + use_subordinate): result = self.db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, - columns_to_join=columns_to_join, use_slave=use_slave) + columns_to_join=columns_to_join, use_subordinate=use_subordinate) return jsonutils.to_primitive(result) def instance_get_active_by_window(self, context, begin, end, diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index e121225b0b..19e6162e06 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -124,7 +124,7 @@ class ConductorAPI(object): * 1.62 - Added object_backport() * 1.63 - Changed the format of values['stats'] from a dict to a JSON string in compute_node_update() - * 1.64 - Added use_slave to instance_get_all_filters() + * 1.64 - Added use_subordinate to instance_get_all_filters() - Remove instance_type_get() - Remove aggregate_get() - Remove aggregate_get_by_host() diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 48d860def8..d1790391de 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -40,7 +40,7 @@ help='Generated XVP conf file'), cfg.StrOpt('console_xvp_pid', default='/var/run/xvp.pid', - help='XVP master process pid file'), + help='XVP main process pid file'), cfg.StrOpt('console_xvp_log', default='/var/log/xvp.log', help='XVP log file'), diff --git a/nova/db/api.py b/nova/db/api.py index 61b295e0c7..7380e41ac5 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -484,12 +484,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): def migration_get_unconfirmed_by_dest_compute(context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): """Finds all unconfirmed migrations within the confirmation window for a specific destination compute host. """ return IMPL.migration_get_unconfirmed_by_dest_compute(context, - confirm_window, dest_compute, use_slave=use_slave) + confirm_window, dest_compute, use_subordinate=use_subordinate) def migration_get_in_progress_by_host_and_node(context, host, node): @@ -626,10 +626,10 @@ def virtual_interface_get_by_uuid(context, vif_uuid): return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) -def virtual_interface_get_by_instance(context, instance_id, use_slave=False): +def virtual_interface_get_by_instance(context, instance_id, use_subordinate=False): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id, - use_slave=use_slave) + use_subordinate=use_subordinate) def virtual_interface_get_by_instance_and_network(context, instance_id, @@ -670,10 +670,10 @@ def instance_destroy(context, instance_uuid, constraint=None, return rv -def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): +def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid, - columns_to_join, use_slave=use_slave) + columns_to_join, use_subordinate=use_subordinate) def instance_get(context, instance_id, columns_to_join=None): @@ -689,18 +689,18 @@ def instance_get_all(context, columns_to_join=None): def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None, - use_slave=False): + use_subordinate=False): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project. @@ -708,15 +708,15 @@ def instance_get_active_by_window_joined(context, begin, end=None, """ return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id, host, - use_slave=use_slave) + use_subordinate=use_subordinate) def instance_get_all_by_host(context, host, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): """Get all instances belonging to a host.""" return IMPL.instance_get_all_by_host(context, host, columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) def instance_get_all_by_host_and_node(context, host, node): @@ -1222,11 +1222,11 @@ def block_device_mapping_update_or_create(context, values, legacy=True): def block_device_mapping_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): """Get all block device mapping belonging to an instance.""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_uuid, - use_slave) + use_subordinate) def block_device_mapping_get_by_volume_id(context, volume_id, @@ -1658,7 +1658,7 @@ def agent_build_update(context, agent_build_id, values): #################### -def bw_usage_get(context, uuid, start_period, mac, use_slave=False): +def bw_usage_get(context, uuid, start_period, mac, use_subordinate=False): """Return bw usage for instance and mac in a given audit period.""" return IMPL.bw_usage_get(context, uuid, start_period, mac) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ffc23d1c71..0a47d1465b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -74,10 +74,10 @@ ] connection_opts = [ - cfg.StrOpt('slave_connection', + cfg.StrOpt('subordinate_connection', secret=True, help='The SQLAlchemy connection string used to connect to the ' - 'slave database'), + 'subordinate database'), ] CONF = cfg.CONF @@ -95,12 +95,12 @@ _SLAVE_FACADE = None -def _create_facade_lazily(use_slave=False): +def _create_facade_lazily(use_subordinate=False): global _MASTER_FACADE global _SLAVE_FACADE - return_slave = use_slave and CONF.database.slave_connection - if not return_slave: + return_subordinate = use_subordinate and CONF.database.subordinate_connection + if not return_subordinate: if _MASTER_FACADE is None: _MASTER_FACADE = db_session.EngineFacade( CONF.database.connection, @@ -110,19 +110,19 @@ def _create_facade_lazily(use_slave=False): else: if _SLAVE_FACADE is None: _SLAVE_FACADE = db_session.EngineFacade( - CONF.database.slave_connection, + CONF.database.subordinate_connection, **dict(CONF.database.iteritems()) ) return _SLAVE_FACADE -def get_engine(use_slave=False): - facade = _create_facade_lazily(use_slave) +def get_engine(use_subordinate=False): + facade = _create_facade_lazily(use_subordinate) return facade.get_engine() -def get_session(use_slave=False, **kwargs): - facade = _create_facade_lazily(use_slave) +def get_session(use_subordinate=False, **kwargs): + facade = _create_facade_lazily(use_subordinate) return facade.get_session(**kwargs) @@ -218,7 +218,7 @@ def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under - :param use_slave: If true, use slave_connection + :param use_subordinate: If true, use subordinate_connection :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict @@ -230,11 +230,11 @@ def model_query(context, model, *args, **kwargs): model parameter. """ - use_slave = kwargs.get('use_slave') or False - if CONF.database.slave_connection == '': - use_slave = False + use_subordinate = kwargs.get('use_subordinate') or False + if CONF.database.subordinate_connection == '': + use_subordinate = False - session = kwargs.get('session') or get_session(use_slave=use_slave) + session = kwargs.get('session') or get_session(use_subordinate=use_subordinate) read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only', False) @@ -1453,9 +1453,9 @@ def virtual_interface_create(context, values): return vif_ref -def _virtual_interface_query(context, session=None, use_slave=False): +def _virtual_interface_query(context, session=None, use_subordinate=False): return model_query(context, models.VirtualInterface, session=session, - read_deleted="no", use_slave=use_slave) + read_deleted="no", use_subordinate=use_subordinate) @require_context @@ -1501,12 +1501,12 @@ def virtual_interface_get_by_uuid(context, vif_uuid): @require_context @require_instance_exists_using_uuid -def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False): +def virtual_interface_get_by_instance(context, instance_uuid, use_subordinate=False): """Gets all virtual interfaces for instance. :param instance_uuid: = uuid of the instance to retrieve vifs for """ - vif_refs = _virtual_interface_query(context, use_slave=use_slave).\ + vif_refs = _virtual_interface_query(context, use_subordinate=use_subordinate).\ filter_by(instance_uuid=instance_uuid).\ order_by(asc("created_at"), asc("id")).\ all() @@ -1703,16 +1703,16 @@ def instance_destroy(context, instance_uuid, constraint=None): @require_context -def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): +def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False): return _instance_get_by_uuid(context, uuid, - columns_to_join=columns_to_join, use_slave=use_slave) + columns_to_join=columns_to_join, use_subordinate=use_subordinate) def _instance_get_by_uuid(context, uuid, session=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): result = _build_instance_get(context, session=session, columns_to_join=columns_to_join, - use_slave=use_slave).\ + use_subordinate=use_subordinate).\ filter_by(uuid=uuid).\ first() @@ -1741,9 +1741,9 @@ def instance_get(context, instance_id, columns_to_join=None): def _build_instance_get(context, session=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): query = model_query(context, models.Instance, session=session, - project_only=True, use_slave=use_slave).\ + project_only=True, use_subordinate=use_subordinate).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')) if columns_to_join is None: @@ -1761,7 +1761,7 @@ def _build_instance_get(context, session=None, def _instances_fill_metadata(context, instances, - manual_joins=None, use_slave=False): + manual_joins=None, use_subordinate=False): """Selectively fill instances with manually-joined metadata. Note that instance will be converted to a dict. @@ -1779,13 +1779,13 @@ def _instances_fill_metadata(context, instances, meta = collections.defaultdict(list) if 'metadata' in manual_joins: for row in _instance_metadata_get_multi(context, uuids, - use_slave=use_slave): + use_subordinate=use_subordinate): meta[row['instance_uuid']].append(row) sys_meta = collections.defaultdict(list) if 'system_metadata' in manual_joins: for row in _instance_system_metadata_get_multi(context, uuids, - use_slave=use_slave): + use_subordinate=use_subordinate): sys_meta[row['instance_uuid']].append(row) pcidevs = collections.defaultdict(list) @@ -1837,7 +1837,7 @@ def instance_get_all(context, columns_to_join=None): @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=None, marker=None, columns_to_join=None, - use_slave=False): + use_subordinate=False): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise. @@ -1879,10 +1879,10 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, sort_fn = {'desc': desc, 'asc': asc} - if CONF.database.slave_connection == '': - use_slave = False + if CONF.database.subordinate_connection == '': + use_subordinate = False - session = get_session(use_slave=use_slave) + session = get_session(use_subordinate=use_subordinate) if columns_to_join is None: columns_to_join = ['info_cache', 'security_groups'] @@ -2071,9 +2071,9 @@ def regex_filter(query, model, filters): @require_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None, - use_slave=False): + use_subordinate=False): """Return instances and joins that were active during window.""" - session = get_session(use_slave=use_slave) + session = get_session(use_subordinate=use_subordinate) query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ @@ -2091,14 +2091,14 @@ def instance_get_active_by_window_joined(context, begin, end=None, def _instance_get_all_query(context, project_only=False, - joins=None, use_slave=False): + joins=None, use_subordinate=False): if joins is None: joins = ['info_cache', 'security_groups'] query = model_query(context, models.Instance, project_only=project_only, - use_slave=use_slave) + use_subordinate=use_subordinate) for join in joins: query = query.options(joinedload(join)) return query @@ -2107,12 +2107,12 @@ def _instance_get_all_query(context, project_only=False, @require_admin_context def instance_get_all_by_host(context, host, columns_to_join=None, - use_slave=False): + use_subordinate=False): return _instances_fill_metadata(context, _instance_get_all_query(context, - use_slave=use_slave).filter_by(host=host).all(), + use_subordinate=use_subordinate).filter_by(host=host).all(), manual_joins=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) def _instance_get_all_uuids_by_host(context, host, session=None): @@ -3474,12 +3474,12 @@ def ec2_snapshot_get_by_uuid(context, snapshot_uuid): def _block_device_mapping_get_query(context, session=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): if columns_to_join is None: columns_to_join = [] query = model_query(context, models.BlockDeviceMapping, - session=session, use_slave=use_slave) + session=session, use_subordinate=use_subordinate) for column in columns_to_join: query = query.options(joinedload(column)) @@ -3561,8 +3561,8 @@ def block_device_mapping_update_or_create(context, values, legacy=True): @require_context def block_device_mapping_get_all_by_instance(context, instance_uuid, - use_slave=False): - return _block_device_mapping_get_query(context, use_slave=use_slave).\ + use_subordinate=False): + return _block_device_mapping_get_query(context, use_subordinate=use_subordinate).\ filter_by(instance_uuid=instance_uuid).\ all() @@ -4067,12 +4067,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): @require_admin_context def migration_get_unconfirmed_by_dest_compute(context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): confirm_window = (timeutils.utcnow() - datetime.timedelta(seconds=confirm_window)) return model_query(context, models.Migration, read_deleted="yes", - use_slave=use_slave).\ + use_subordinate=use_subordinate).\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ filter_by(dest_compute=dest_compute).\ @@ -4591,11 +4591,11 @@ def cell_get_all(context): # User-provided metadata def _instance_metadata_get_multi(context, instance_uuids, - session=None, use_slave=False): + session=None, use_subordinate=False): if not instance_uuids: return [] return model_query(context, models.InstanceMetadata, - session=session, use_slave=use_slave).\ + session=session, use_subordinate=use_subordinate).\ filter( models.InstanceMetadata.instance_uuid.in_(instance_uuids)) @@ -4657,11 +4657,11 @@ def instance_metadata_update(context, instance_uuid, metadata, delete): def _instance_system_metadata_get_multi(context, instance_uuids, - session=None, use_slave=False): + session=None, use_subordinate=False): if not instance_uuids: return [] return model_query(context, models.InstanceSystemMetadata, - session=session, use_slave=use_slave).\ + session=session, use_subordinate=use_subordinate).\ filter( models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids)) @@ -4764,9 +4764,9 @@ def agent_build_update(context, agent_build_id, values): #################### @require_context -def bw_usage_get(context, uuid, start_period, mac, use_slave=False): +def bw_usage_get(context, uuid, start_period, mac, use_subordinate=False): return model_query(context, models.BandwidthUsage, read_deleted="yes", - use_slave=use_slave).\ + use_subordinate=use_subordinate).\ filter_by(start_period=start_period).\ filter_by(uuid=uuid).\ filter_by(mac=mac).\ diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py index 488467f119..3d5e4f33f9 100644 --- a/nova/network/ldapdns.py +++ b/nova/network/ldapdns.py @@ -42,9 +42,9 @@ default='password', help='Password for LDAP DNS', secret=True), - cfg.StrOpt('ldap_dns_soa_hostmaster', - default='hostmaster@example.org', - help='Hostmaster for LDAP DNS driver Statement of Authority'), + cfg.StrOpt('ldap_dns_soa_hostmain', + default='hostmain@example.org', + help='Hostmain for LDAP DNS driver Statement of Authority'), cfg.MultiStrOpt('ldap_dns_servers', default=['dns.example.org'], help='DNS Servers for LDAP DNS driver'), @@ -156,7 +156,7 @@ def _soa(cls): date = time.strftime('%Y%m%d%H%M%S') soa = '%s %s %s %s %s %s %s' % ( CONF.ldap_dns_servers[0], - CONF.ldap_dns_soa_hostmaster, + CONF.ldap_dns_soa_hostmain, date, CONF.ldap_dns_soa_refresh, CONF.ldap_dns_soa_retry, diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index c929280a00..359a4f22b4 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1570,7 +1570,7 @@ def ensure_bridge(bridge, interface, net_attrs=None, gateway=True, out, err = _execute('brctl', 'addif', bridge, interface, check_exit_code=False, run_as_root=True) if (err and err != "device %s is already a member of a bridge; " - "can't enslave it to bridge %s.\n" % (interface, bridge)): + "can't ensubordinate it to bridge %s.\n" % (interface, bridge)): msg = _('Failed to add interface: %s') % err raise exception.NovaException(msg) diff --git a/nova/network/manager.py b/nova/network/manager.py index 857a22a415..8becdfe53c 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -568,7 +568,7 @@ def get_instance_nw_info(self, context, instance_id, rxtx_factor, where network = dict containing pertinent data from a network db object and info = dict containing pertinent networking data """ - use_slave = kwargs.get('use_slave') or False + use_subordinate = kwargs.get('use_subordinate') or False if not uuidutils.is_uuid_like(instance_id): instance_id = instance_uuid @@ -576,7 +576,7 @@ def get_instance_nw_info(self, context, instance_id, rxtx_factor, LOG.debug('Get instance network info', instance_uuid=instance_uuid) vifs = objects.VirtualInterfaceList.get_by_instance_uuid( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) networks = {} for vif in vifs: diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py index 7131cef564..5566eb329a 100644 --- a/nova/network/neutronv2/api.py +++ b/nova/network/neutronv2/api.py @@ -540,13 +540,13 @@ def show_port(self, context, port_id): return neutronv2.get_client(context).show_port(port_id) def get_instance_nw_info(self, context, instance, networks=None, - port_ids=None, use_slave=False): + port_ids=None, use_subordinate=False): """Return network information for specified instance and update cache. """ - # NOTE(geekinutah): It would be nice if use_slave had us call - # special APIs that pummeled slaves instead of - # the master. For now we just ignore this arg. + # NOTE(geekinutah): It would be nice if use_subordinate had us call + # special APIs that pummeled subordinates instead of + # the main. For now we just ignore this arg. result = self._get_instance_nw_info(context, instance, networks, port_ids) base_api.update_instance_cache_with_nw_info(self, context, instance, diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py index 18c2b6fda7..e3cb27cafa 100644 --- a/nova/objects/block_device.py +++ b/nova/objects/block_device.py @@ -188,7 +188,7 @@ def obj_load_attr(self, attrname): class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: BlockDeviceMapping <= version 1.1 - # Version 1.2: Added use_slave to get_by_instance_uuid + # Version 1.2: Added use_subordinate to get_by_instance_uuid VERSION = '1.2' fields = { @@ -201,9 +201,9 @@ class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): } @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_bdms = db.block_device_mapping_get_all_by_instance( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) diff --git a/nova/objects/instance.py b/nova/objects/instance.py index 2d6276205c..320a13d665 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -68,7 +68,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject): # Version 1.7: String attributes updated to support unicode # Version 1.8: 'security_groups' and 'pci_devices' cannot be None # Version 1.9: Make uuid a non-None real string - # Version 1.10: Added use_slave to refresh and get_by_uuid + # Version 1.10: Added use_subordinate to refresh and get_by_uuid # Version 1.11: Update instance from database during destroy # Version 1.12: Added ephemeral_key_uuid # Version 1.13: Added delete_metadata_key() @@ -304,13 +304,13 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None): return instance @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): + def get_by_uuid(cls, context, uuid, expected_attrs=None, use_subordinate=False): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @@ -489,12 +489,12 @@ def _handle_cell_update_from_api(): self.obj_reset_changes() @base.remotable - def refresh(self, context, use_slave=False): + def refresh(self, context, use_subordinate=False): extra = [field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field)] current = self.__class__.get_by_uuid(context, uuid=self.uuid, expected_attrs=extra, - use_slave=use_slave) + use_subordinate=use_subordinate) # NOTE(danms): We orphan the instance copy so we do not unexpectedly # trigger a lazy-load (which would mean we failed to calculate the # expected_attrs properly) @@ -617,14 +617,14 @@ def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): class InstanceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version - # Version 1.1: Added use_slave to get_by_host + # Version 1.1: Added use_subordinate to get_by_host # Instance <= version 1.9 # Version 1.2: Instance <= version 1.11 - # Version 1.3: Added use_slave to get_by_filters + # Version 1.3: Added use_subordinate to get_by_filters # Version 1.4: Instance <= version 1.12 # Version 1.5: Added method get_active_by_window_joined. # Version 1.6: Instance <= version 1.13 - # Version 1.7: Added use_slave to get_active_by_window_joined + # Version 1.7: Added use_subordinate to get_active_by_window_joined VERSION = '1.7' fields = { @@ -644,19 +644,19 @@ class InstanceList(base.ObjectListBase, base.NovaObject): @base.remotable_classmethod def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False): + marker=None, expected_attrs=None, use_subordinate=False): db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @base.remotable_classmethod - def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): + def get_by_host(cls, context, host, expected_attrs=None, use_subordinate=False): db_inst_list = db.instance_get_all_by_host( context, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @@ -687,7 +687,7 @@ def get_hung_in_rebooting(cls, context, reboot_window, def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, - use_slave=False): + use_subordinate=False): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) @@ -704,7 +704,7 @@ def _get_active_by_window_joined(cls, context, begin, end=None, def get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, - use_slave=False): + use_subordinate=False): """Get instances and joins active during a certain time window. :param:context: nova request context @@ -714,7 +714,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, :param:host: used to filter instances on a given compute host :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances - :param use_slave if True, ship this query off to a DB slave + :param use_subordinate if True, ship this query off to a DB subordinate :returns: InstanceList """ @@ -725,7 +725,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, return cls._get_active_by_window_joined(context, begin, end, project_id, host, expected_attrs, - use_slave=use_slave) + use_subordinate=use_subordinate) @base.remotable_classmethod def get_by_security_group_id(cls, context, security_group_id): diff --git a/nova/objects/migration.py b/nova/objects/migration.py index 3679198e4a..d9bbb3d5f6 100644 --- a/nova/objects/migration.py +++ b/nova/objects/migration.py @@ -81,7 +81,7 @@ def instance(self): class MigrationList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Migration <= 1.1 - # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute + # Version 1.1: Added use_subordinate to get_unconfirmed_by_dest_compute VERSION = '1.1' fields = { @@ -95,9 +95,9 @@ class MigrationList(base.ObjectListBase, base.NovaObject): @base.remotable_classmethod def get_unconfirmed_by_dest_compute(cls, context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): db_migrations = db.migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute, use_slave=use_slave) + context, confirm_window, dest_compute, use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) diff --git a/nova/objects/virtual_interface.py b/nova/objects/virtual_interface.py index 51cd24c352..b6671b3bcf 100644 --- a/nova/objects/virtual_interface.py +++ b/nova/objects/virtual_interface.py @@ -95,8 +95,8 @@ def get_all(cls, context): objects.VirtualInterface, db_vifs) @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid, - use_slave=use_slave) + use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs) diff --git a/nova/openstack/common/db/sqlalchemy/migration.py b/nova/openstack/common/db/sqlalchemy/migration.py index 1d6ac34942..b9dd2851ad 100644 --- a/nova/openstack/common/db/sqlalchemy/migration.py +++ b/nova/openstack/common/db/sqlalchemy/migration.py @@ -64,7 +64,7 @@ def _get_unique_constraints(self, table): data = table.metadata.bind.execute( """SELECT sql - FROM sqlite_master + FROM sqlite_main WHERE type='table' AND name=:table_name""", diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py index 4dd2ac6594..b120c0b04c 100644 --- a/nova/openstack/common/gettextutils.py +++ b/nova/openstack/common/gettextutils.py @@ -331,9 +331,9 @@ def get_available_languages(domain): # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list + # renamed to locale_identifiers() in >=1.0, the requirements main list # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects + # this check when the main list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 6b9ca05833..3406b9f7d3 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -2516,7 +2516,7 @@ def fake_show(meh, context, id, **kwargs): self.stubs.Set(fake._FakeImageService, 'show', fake_show) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': volumes[0], 'snapshot_id': snapshots[0], @@ -2592,7 +2592,7 @@ def test_create_image_instance_store(self): ec2_instance_id = self._run_instance(**kwargs) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': volumes[0], 'snapshot_id': snapshots[0], @@ -2613,7 +2613,7 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id, no_reboot=True) @staticmethod - def _fake_bdm_get(ctxt, id, use_slave=False): + def _fake_bdm_get(ctxt, id, use_subordinate=False): blockdms = [{'volume_id': 87654321, 'source_type': 'volume', 'destination_type': 'volume', diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py index 9f60a03032..d40be0d76d 100644 --- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py +++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py @@ -65,7 +65,7 @@ def fake_instance_get(context, id_): self.stubs.Set(db, 'instance_get', fake_instance_get) def fake_instance_get_by_uuid(context, uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): for instance in FAKE_INSTANCES: if uuid == instance['uuid']: return instance diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py index 0c4f0c1558..51bc6886a0 100644 --- a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py +++ b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py @@ -79,7 +79,7 @@ def test_list_actions_restricted_by_project(self): def fake_instance_get_by_uuid(context, instance_id, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -97,7 +97,7 @@ def test_get_action_restricted_by_project(self): def fake_instance_get_by_uuid(context, instance_id, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -120,7 +120,7 @@ def fake_get(self, context, instance_uuid, expected_attrs=None, want_objects=False): return {'uuid': instance_uuid} - def fake_instance_get_by_uuid(context, instance_id, use_slave=False): + def fake_instance_get_by_uuid(context, instance_id, use_subordinate=False): return {'name': 'fake', 'project_id': context.project_id} self.stubs.Set(compute_api.API, 'get', fake_get) diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py index b3ef7ebeb6..2b55f854aa 100644 --- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py @@ -82,7 +82,7 @@ def security_group_rule_db(rule, id=None): def return_server(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': int(server_id), 'power_state': 0x01, @@ -93,7 +93,7 @@ def return_server(context, server_id, def return_server_by_uuid(context, server_uuid, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'power_state': 0x01, @@ -402,7 +402,7 @@ def test_get_security_group_by_instance(self): expected = {'security_groups': groups} def return_instance(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertEqual(server_id, FAKE_UUID1) return return_server_by_uuid(context, server_id) @@ -429,7 +429,7 @@ def test_get_security_group_empty_for_instance(self, mock_sec_group, expected = {'security_groups': []} def return_instance(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertEqual(server_id, FAKE_UUID1) return return_server_by_uuid(context, server_id) mock_db_get_ins.side_effect = return_instance @@ -1706,7 +1706,7 @@ def construct(self): root.set('id') root.set('imageRef') root.set('flavorRef') - return xmlutil.MasterTemplate(root, 1, + return xmlutil.MainTemplate(root, 1, nsmap={None: xmlutil.XMLNS_V11}) def _encode_body(self, body): diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py index bc5b132258..761f29c511 100644 --- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py +++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py @@ -26,7 +26,7 @@ def fake_instance_get(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): result = fakes.stub_instance(id=1, uuid=instance_id) result['created_at'] = None result['deleted_at'] = None diff --git a/nova/tests/api/openstack/compute/contrib/test_shelve.py b/nova/tests/api/openstack/compute/contrib/test_shelve.py index 43f852019c..8ab472c002 100644 --- a/nova/tests/api/openstack/compute/contrib/test_shelve.py +++ b/nova/tests/api/openstack/compute/contrib/test_shelve.py @@ -28,7 +28,7 @@ def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py index 3c37a7f419..efa15c9d46 100644 --- a/nova/tests/api/openstack/compute/contrib/test_volumes.py +++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py @@ -97,7 +97,7 @@ def fake_compute_volume_snapshot_create(self, context, volume_id, pass -def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False): +def fake_bdms_get_all_by_instance(context, instance_uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': instance_uuid, diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py index 29ed96a32b..d2ea6f29e8 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py @@ -83,7 +83,7 @@ def test_list_actions_restricted_by_project(self): policy.set_rules(rules) def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -100,7 +100,7 @@ def test_get_action_restricted_by_project(self): policy.set_rules(rules) def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -124,7 +124,7 @@ def fake_get(self, context, instance_uuid, expected_attrs=None, return {'uuid': instance_uuid} def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py index db5447ddd0..c829e7c0d8 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py @@ -461,7 +461,7 @@ def test_rebuild_admin_password_pass_disabled(self): def test_rebuild_server_not_found(self): def server_not_found(self, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=instance_id) self.stubs.Set(db, 'instance_get_by_uuid', server_not_found) @@ -870,7 +870,7 @@ def _fake_id(x): image_service.create(None, original_image) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', @@ -948,7 +948,7 @@ def _fake_id(x): image_service = glance.get_default_image_service() def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py index cb4d1a3f1c..0b514a040c 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py @@ -90,7 +90,7 @@ def return_server(context, server_id, columns_to_join=None): def return_server_by_uuid(context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', @@ -102,7 +102,7 @@ def return_server_by_uuid(context, server_uuid, def return_server_nonexistent(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=server_id) @@ -576,7 +576,7 @@ def _return_server_in_build(self, context, server_id, 'vm_state': vm_states.BUILDING}) def _return_server_in_build_by_uuid(self, context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py index b4fe5793ab..b3c12b809f 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py @@ -113,7 +113,7 @@ def fake_start_stop_invalid_state(self, context, instance): def fake_instance_get_by_uuid_not_found(context, uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): raise exception.InstanceNotFound(instance_id=uuid) @@ -694,7 +694,7 @@ def fake_get_all(compute_self, context, search_opts=None, def test_tenant_id_filter_converts_to_project_id_for_admin(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'newfake') @@ -714,7 +714,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_tenant_id_filter_no_admin_context(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertNotEqual(filters, None) self.assertEqual(filters['project_id'], 'fake') @@ -730,7 +730,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_tenant_id_filter_implies_all_tenants(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertNotEqual(filters, None) # The project_id assertion checks that the project_id @@ -752,7 +752,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_normal(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -769,7 +769,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_one(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -786,7 +786,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_zero(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -803,7 +803,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_false(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -836,7 +836,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_admin_restricted_tenant(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'fake') @@ -854,7 +854,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_pass_policy(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False, + columns_to_join=None, use_subordinate=False, expected_attrs=None): self.assertIsNotNone(filters) self.assertNotIn('project_id', filters) diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py b/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py index b790e45d09..d4ae62e15f 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py @@ -28,7 +28,7 @@ def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 6d03436708..0af600c208 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -626,7 +626,7 @@ def test_rebuild_admin_pass_pass_disabled(self): def test_rebuild_server_not_found(self): def server_not_found(self, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=instance_id) self.stubs.Set(db, 'instance_get_by_uuid', server_not_found) @@ -1088,7 +1088,7 @@ def _fake_id(x): image_service.create(None, original_image) def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', @@ -1166,7 +1166,7 @@ def _fake_id(x): image_service = glance.get_default_image_service() def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py index 71974a95bf..d809c83488 100644 --- a/nova/tests/api/openstack/compute/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/test_server_metadata.py @@ -91,7 +91,7 @@ def return_server(context, server_id, columns_to_join=None): def return_server_by_uuid(context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', @@ -103,7 +103,7 @@ def return_server_by_uuid(context, server_uuid, def return_server_nonexistent(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=server_id) @@ -569,7 +569,7 @@ def _return_server_in_build(self, context, server_id, 'vm_state': vm_states.BUILDING}) def _return_server_in_build_by_uuid(self, context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index b491e4394f..345404002e 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -660,7 +660,7 @@ def fake_get_all(compute_self, context, search_opts=None, def test_tenant_id_filter_converts_to_project_id_for_admin(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'newfake') self.assertFalse(filters.get('tenant_id')) @@ -679,7 +679,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_normal(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -695,7 +695,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_one(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] @@ -711,7 +711,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_zero(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -727,7 +727,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_param_false(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertNotIn('all_tenants', filters) return [fakes.stub_instance(100)] @@ -758,7 +758,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_admin_restricted_tenant(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIsNotNone(filters) self.assertEqual(filters['project_id'], 'fake') return [fakes.stub_instance(100)] @@ -775,7 +775,7 @@ def fake_get_all(context, filters=None, sort_key=None, def test_all_tenants_pass_policy(self): def fake_get_all(context, filters=None, sort_key=None, sort_dir='desc', limit=None, marker=None, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIsNotNone(filters) self.assertNotIn('project_id', filters) return [fakes.stub_instance(100)] diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 41e76d7747..e05efa5614 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -370,7 +370,7 @@ def get_fake_uuid(token=0): def fake_instance_get(**kwargs): - def _return_server(context, uuid, columns_to_join=None, use_slave=False): + def _return_server(context, uuid, columns_to_join=None, use_subordinate=False): return stub_instance(1, **kwargs) return _return_server @@ -393,8 +393,8 @@ def _return_servers(context, *args, **kwargs): if 'columns_to_join' in kwargs: kwargs.pop('columns_to_join') - if 'use_slave' in kwargs: - kwargs.pop('use_slave') + if 'use_subordinate' in kwargs: + kwargs.pop('use_subordinate') for i in xrange(num_servers): uuid = get_fake_uuid(i) @@ -641,7 +641,7 @@ def stub_snapshot_get_all(self, context): stub_snapshot(102, project_id='superduperfake')] -def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False): +def stub_bdm_get_all_by_instance(context, instance_uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}), diff --git a/nova/tests/api/openstack/test_xmlutil.py b/nova/tests/api/openstack/test_xmlutil.py index 903340c8d6..31c34bc2fc 100644 --- a/nova/tests/api/openstack/test_xmlutil.py +++ b/nova/tests/api/openstack/test_xmlutil.py @@ -392,17 +392,17 @@ def test__render(self): attr2=xmlutil.ConstantSelector(2), attr3=xmlutil.ConstantSelector(3)) - # Create a master template element - master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) + # Create a main template element + main_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1']) - # Create a couple of slave template element - slave_elems = [ + # Create a couple of subordinate template element + subordinate_elems = [ xmlutil.TemplateElement('test', attr2=attrs['attr2']), xmlutil.TemplateElement('test', attr3=attrs['attr3']), ] # Try the render - elem = master_elem._render(None, None, slave_elems, None) + elem = main_elem._render(None, None, subordinate_elems, None) # Verify the particulars of the render self.assertEqual(elem.tag, 'test') @@ -414,7 +414,7 @@ def test__render(self): parent = etree.Element('parent') # Try the render again... - elem = master_elem._render(parent, None, slave_elems, dict(a='foo')) + elem = main_elem._render(parent, None, subordinate_elems, dict(a='foo')) # Verify the particulars of the render self.assertEqual(len(parent), 1) @@ -551,47 +551,47 @@ def test__nsmap(self): self.assertEqual(len(nsmap), 1) self.assertEqual(nsmap['a'], 'foo') - def test_master_attach(self): - # Set up a master template + def test_main_attach(self): + # Set up a main template elem = xmlutil.TemplateElement('test') - tmpl = xmlutil.MasterTemplate(elem, 1) + tmpl = xmlutil.MainTemplate(elem, 1) - # Make sure it has a root but no slaves + # Make sure it has a root but no subordinates self.assertEqual(tmpl.root, elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) self.assertTrue(repr(tmpl)) - # Try to attach an invalid slave + # Try to attach an invalid subordinate bad_elem = xmlutil.TemplateElement('test2') self.assertRaises(ValueError, tmpl.attach, bad_elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) - # Try to attach an invalid and a valid slave + # Try to attach an invalid and a valid subordinate good_elem = xmlutil.TemplateElement('test') self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) # Try to attach an inapplicable template class InapplicableTemplate(xmlutil.Template): - def apply(self, master): + def apply(self, main): return False inapp_tmpl = InapplicableTemplate(good_elem) tmpl.attach(inapp_tmpl) - self.assertEqual(len(tmpl.slaves), 0) + self.assertEqual(len(tmpl.subordinates), 0) # Now try attaching an applicable template tmpl.attach(good_elem) - self.assertEqual(len(tmpl.slaves), 1) - self.assertEqual(tmpl.slaves[0].root, good_elem) + self.assertEqual(len(tmpl.subordinates), 1) + self.assertEqual(tmpl.subordinates[0].root, good_elem) - def test_master_copy(self): - # Construct a master template + def test_main_copy(self): + # Construct a main template elem = xmlutil.TemplateElement('test') - tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo')) + tmpl = xmlutil.MainTemplate(elem, 1, nsmap=dict(a='foo')) - # Give it a slave - slave = xmlutil.TemplateElement('test') - tmpl.attach(slave) + # Give it a subordinate + subordinate = xmlutil.TemplateElement('test') + tmpl.attach(subordinate) # Construct a copy copy = tmpl.copy() @@ -601,43 +601,43 @@ def test_master_copy(self): self.assertEqual(tmpl.root, copy.root) self.assertEqual(tmpl.version, copy.version) self.assertEqual(id(tmpl.nsmap), id(copy.nsmap)) - self.assertNotEqual(id(tmpl.slaves), id(copy.slaves)) - self.assertEqual(len(tmpl.slaves), len(copy.slaves)) - self.assertEqual(tmpl.slaves[0], copy.slaves[0]) + self.assertNotEqual(id(tmpl.subordinates), id(copy.subordinates)) + self.assertEqual(len(tmpl.subordinates), len(copy.subordinates)) + self.assertEqual(tmpl.subordinates[0], copy.subordinates[0]) - def test_slave_apply(self): - # Construct a master template + def test_subordinate_apply(self): + # Construct a main template elem = xmlutil.TemplateElement('test') - master = xmlutil.MasterTemplate(elem, 3) + main = xmlutil.MainTemplate(elem, 3) - # Construct a slave template with applicable minimum version - slave = xmlutil.SlaveTemplate(elem, 2) - self.assertEqual(slave.apply(master), True) - self.assertTrue(repr(slave)) + # Construct a subordinate template with applicable minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 2) + self.assertEqual(subordinate.apply(main), True) + self.assertTrue(repr(subordinate)) - # Construct a slave template with equal minimum version - slave = xmlutil.SlaveTemplate(elem, 3) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with equal minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 3) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with inapplicable minimum version - slave = xmlutil.SlaveTemplate(elem, 4) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with inapplicable minimum version + subordinate = xmlutil.SubordinateTemplate(elem, 4) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with applicable version range - slave = xmlutil.SlaveTemplate(elem, 2, 4) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with applicable version range + subordinate = xmlutil.SubordinateTemplate(elem, 2, 4) + self.assertEqual(subordinate.apply(main), True) - # Construct a slave template with low version range - slave = xmlutil.SlaveTemplate(elem, 1, 2) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with low version range + subordinate = xmlutil.SubordinateTemplate(elem, 1, 2) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with high version range - slave = xmlutil.SlaveTemplate(elem, 4, 5) - self.assertEqual(slave.apply(master), False) + # Construct a subordinate template with high version range + subordinate = xmlutil.SubordinateTemplate(elem, 4, 5) + self.assertEqual(subordinate.apply(main), False) - # Construct a slave template with matching version range - slave = xmlutil.SlaveTemplate(elem, 3, 3) - self.assertEqual(slave.apply(master), True) + # Construct a subordinate template with matching version range + subordinate = xmlutil.SubordinateTemplate(elem, 3, 3) + self.assertEqual(subordinate.apply(main), True) def test__serialize(self): # Our test object to serialize @@ -658,7 +658,7 @@ def test__serialize(self): }, } - # Set up our master template + # Set up our main template root = xmlutil.TemplateElement('test', selector='test', name='name') value = xmlutil.SubTemplateElement(root, 'value', selector='values') @@ -666,22 +666,22 @@ def test__serialize(self): attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs') xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items, key=0, value=1) - master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo')) + main = xmlutil.MainTemplate(root, 1, nsmap=dict(f='foo')) - # Set up our slave template - root_slave = xmlutil.TemplateElement('test', selector='test') - image = xmlutil.SubTemplateElement(root_slave, 'image', + # Set up our subordinate template + root_subordinate = xmlutil.TemplateElement('test', selector='test') + image = xmlutil.SubTemplateElement(root_subordinate, 'image', selector='image', id='id') image.text = xmlutil.Selector('name') - slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar')) + subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1, nsmap=dict(b='bar')) - # Attach the slave to the master... - master.attach(slave) + # Attach the subordinate to the main... + main.attach(subordinate) # Try serializing our object - siblings = master._siblings() - nsmap = master._nsmap() - result = master._serialize(None, obj, siblings, nsmap) + siblings = main._siblings() + nsmap = main._nsmap() + result = main._serialize(None, obj, siblings, nsmap) # Now we get to manually walk the element tree... self.assertEqual(result.tag, 'test') @@ -713,14 +713,14 @@ def test_serialize_with_colon_tagname_support(self): expected_xml = (("\n" '999' '')) - # Set up our master template + # Set up our main template root = xmlutil.TemplateElement('extra_specs', selector='extra_specs', colon_ns=True) value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar', colon_ns=True) value.text = xmlutil.Selector() - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test__serialize_with_empty_datum_selector(self): @@ -734,76 +734,76 @@ def test__serialize_with_empty_datum_selector(self): root = xmlutil.TemplateElement('test', selector='test', name='name') - master = xmlutil.MasterTemplate(root, 1) - root_slave = xmlutil.TemplateElement('test', selector='test') - image = xmlutil.SubTemplateElement(root_slave, 'image', + main = xmlutil.MainTemplate(root, 1) + root_subordinate = xmlutil.TemplateElement('test', selector='test') + image = xmlutil.SubTemplateElement(root_subordinate, 'image', selector='image') image.set('id') xmlutil.make_links(image, 'links') - slave = xmlutil.SlaveTemplate(root_slave, 1) - master.attach(slave) + subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1) + main.attach(subordinate) - siblings = master._siblings() - result = master._serialize(None, obj, siblings) + siblings = main._siblings() + result = main._serialize(None, obj, siblings) self.assertEqual(result.tag, 'test') self.assertEqual(result[0].tag, 'image') self.assertEqual(result[0].get('id'), str(obj['test']['image'])) -class MasterTemplateBuilder(xmlutil.TemplateBuilder): +class MainTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') - return xmlutil.MasterTemplate(elem, 1) + return xmlutil.MainTemplate(elem, 1) -class SlaveTemplateBuilder(xmlutil.TemplateBuilder): +class SubordinateTemplateBuilder(xmlutil.TemplateBuilder): def construct(self): elem = xmlutil.TemplateElement('test') - return xmlutil.SlaveTemplate(elem, 1) + return xmlutil.SubordinateTemplate(elem, 1) class TemplateBuilderTest(test.NoDBTestCase): - def test_master_template_builder(self): + def test_main_template_builder(self): # Make sure the template hasn't been built yet - self.assertIsNone(MasterTemplateBuilder._tmpl) + self.assertIsNone(MainTemplateBuilder._tmpl) # Now, construct the template - tmpl1 = MasterTemplateBuilder() + tmpl1 = MainTemplateBuilder() # Make sure that there is a template cached... - self.assertIsNotNone(MasterTemplateBuilder._tmpl) + self.assertIsNotNone(MainTemplateBuilder._tmpl) # Make sure it wasn't what was returned... - self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1) + self.assertNotEqual(MainTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt - cached = MasterTemplateBuilder._tmpl - tmpl2 = MasterTemplateBuilder() - self.assertEqual(MasterTemplateBuilder._tmpl, cached) + cached = MainTemplateBuilder._tmpl + tmpl2 = MainTemplateBuilder() + self.assertEqual(MainTemplateBuilder._tmpl, cached) # Make sure we're always getting fresh copies self.assertNotEqual(tmpl1, tmpl2) # Make sure we can override the copying behavior - tmpl3 = MasterTemplateBuilder(False) - self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3) + tmpl3 = MainTemplateBuilder(False) + self.assertEqual(MainTemplateBuilder._tmpl, tmpl3) - def test_slave_template_builder(self): + def test_subordinate_template_builder(self): # Make sure the template hasn't been built yet - self.assertIsNone(SlaveTemplateBuilder._tmpl) + self.assertIsNone(SubordinateTemplateBuilder._tmpl) # Now, construct the template - tmpl1 = SlaveTemplateBuilder() + tmpl1 = SubordinateTemplateBuilder() # Make sure there is a template cached... - self.assertIsNotNone(SlaveTemplateBuilder._tmpl) + self.assertIsNotNone(SubordinateTemplateBuilder._tmpl) # Make sure it was what was returned... - self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1) # Make sure it doesn't get rebuilt - tmpl2 = SlaveTemplateBuilder() - self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1) + tmpl2 = SubordinateTemplateBuilder() + self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1) # Make sure we're always getting the cached copy self.assertEqual(tmpl1, tmpl2) @@ -829,7 +829,7 @@ def test_make_flat_dict(self): expected_xml = ("\n" 'foobar') root = xmlutil.make_flat_dict('wrapper') - tmpl = xmlutil.MasterTemplate(root, 1) + tmpl = xmlutil.MainTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) @@ -837,7 +837,7 @@ def test_make_flat_dict(self): 'foobar' "") root = xmlutil.make_flat_dict('wrapper', ns='ns') - tmpl = xmlutil.MasterTemplate(root, 1) + tmpl = xmlutil.MainTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) @@ -847,10 +847,10 @@ def test_make_flat_dict_with_colon_tagname_support(self): expected_xml = (("\n" '999' '')) - # Set up our master template + # Set up our main template root = xmlutil.make_flat_dict('extra_specs', colon_ns=True) - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test_make_flat_dict_with_parent(self): @@ -867,8 +867,8 @@ def test_make_flat_dict_with_parent(self): root.set('id') extra = xmlutil.make_flat_dict('extra_info', root=root) root.append(extra) - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test_make_flat_dict_with_dicts(self): @@ -885,8 +885,8 @@ def test_make_flat_dict_with_dicts(self): ignore_sub_dicts=True) extra = xmlutil.make_flat_dict('extra_info', selector='extra_info') root.append(extra) - master = xmlutil.MasterTemplate(root, 1) - result = master.serialize(obj) + main = xmlutil.MainTemplate(root, 1) + result = main.serialize(obj) self.assertEqual(expected_xml, result) def test_safe_parse_xml(self): diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 1fbaff2a12..d7f3f9844c 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -641,7 +641,7 @@ def test_poll_bandwidth_usage_not_implemented(self): time.time().AndReturn(20) time.time().AndReturn(21) objects.InstanceList.get_by_host(ctxt, 'fake-mini', - use_slave=True).AndReturn([]) + use_subordinate=True).AndReturn([]) self.compute.driver.get_all_bw_counters([]).AndRaise( NotImplementedError) self.mox.ReplayAll() @@ -672,7 +672,7 @@ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): self.compute.host) mock_get_by_inst.assert_called_once_with('fake-context', 'fake-instance-uuid', - use_slave=False) + use_subordinate=False) self.assertEqual(expected_host_bdms, got_host_bdms) def test_poll_volume_usage_disabled(self): @@ -693,7 +693,7 @@ def test_poll_volume_usage_returns_no_vols(self): self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # Following methods are called. utils.last_completed_audit_period().AndReturn((0, 0)) - self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([]) + self.compute._get_host_volume_bdms(ctxt, use_subordinate=True).AndReturn([]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) @@ -710,7 +710,7 @@ def test_poll_volume_usage_with_data(self): # All the mocks are called utils.last_completed_audit_period().AndReturn((10, 20)) self.compute._get_host_volume_bdms(ctxt, - use_slave=True).AndReturn([1, 2]) + use_subordinate=True).AndReturn([1, 2]) self.compute._update_volume_usage_cache(ctxt, [3, 4]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) @@ -740,7 +740,7 @@ def test_detach_volume_usage(self): self.compute.driver.block_stats(instance['name'], 'vdb').\ AndReturn([1L, 30L, 1L, 20L, None]) self.compute._get_host_volume_bdms(self.context, - use_slave=True).AndReturn( + use_subordinate=True).AndReturn( host_volume_bdms) self.compute.driver.get_all_volume_usage( self.context, host_volume_bdms).AndReturn( @@ -5944,9 +5944,9 @@ def test_cleanup_running_deleted_instances_reap(self): self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\ AndRaise(test.TestingException) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, - inst1.uuid, use_slave=True).AndReturn(bdms) + inst1.uuid, use_subordinate=True).AndReturn(bdms) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, - inst2.uuid, use_slave=True).AndReturn(bdms) + inst2.uuid, use_subordinate=True).AndReturn(bdms) self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\ AndReturn(None) @@ -6051,7 +6051,7 @@ def test_get_instance_nw_info(self): db.instance_get_by_uuid(self.context, fake_inst['uuid'], columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.compute.network_api.get_instance_nw_info(self.context, mox.IsA(objects.Instance)).AndReturn(fake_nw_info) @@ -6082,13 +6082,13 @@ def test_heal_instance_info_cache(self): 'get_nw_info': 0, 'expected_instance': None} def fake_instance_get_all_by_host(context, host, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): call_info['get_all_by_host'] += 1 self.assertEqual([], columns_to_join) return instances[:] def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): if instance_uuid not in instance_map: raise exception.InstanceNotFound(instance_id=instance_uuid) call_info['get_by_uuid'] += 1 @@ -6097,7 +6097,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance_map[instance_uuid] # NOTE(comstud): Override the stub in setUp() - def fake_get_instance_nw_info(context, instance, use_slave=False): + def fake_get_instance_nw_info(context, instance, use_subordinate=False): # Note that this exception gets caught in compute/manager # and is ignored. However, the below increment of # 'get_nw_info' won't happen, and you'll get an assert @@ -6177,7 +6177,7 @@ def test_poll_rescued_instances(self, unrescue, get): def fake_instance_get_all_by_filters(context, filters, expected_attrs=None, - use_slave=False): + use_subordinate=False): self.assertEqual(["system_metadata"], expected_attrs) return instances @@ -6242,7 +6242,7 @@ def test_poll_unconfirmed_resizes(self): migrations.append(fake_mig) def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIn('metadata', columns_to_join) self.assertIn('system_metadata', columns_to_join) # raise InstanceNotFound exception for uuid 'noexist' @@ -6253,7 +6253,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance def fake_migration_get_unconfirmed_by_dest_compute(context, - resize_confirm_window, dest_compute, use_slave=False): + resize_confirm_window, dest_compute, use_subordinate=False): self.assertEqual(dest_compute, CONF.host) return migrations @@ -6350,7 +6350,7 @@ def test_instance_build_timeout_mixed_instances(self): sort_dir, marker=None, columns_to_join=[], - use_slave=True, + use_subordinate=True, limit=None) self.assertThat(conductor_instance_update.mock_calls, testtools_matchers.HasLength(len(old_instances))) @@ -6706,7 +6706,7 @@ def test_reclaim_queued_deletes_continue_on_error(self): objects.InstanceList.get_by_filters( ctxt, mox.IgnoreArg(), expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, - use_slave=True + use_subordinate=True ).AndReturn(instances) # The first instance delete fails. @@ -6747,12 +6747,12 @@ def test_sync_power_states(self): {'state': power_state.RUNNING}) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.RUNNING, - use_slave=True) + use_subordinate=True) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( {'state': power_state.SHUTDOWN}) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.SHUTDOWN, - use_slave=True) + use_subordinate=True) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) @@ -7677,7 +7677,7 @@ def test_get(self): instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault'])) def fake_db_get(_context, _instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return exp_instance self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get) @@ -7698,7 +7698,7 @@ def test_get_with_admin_context(self): instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault'])) def fake_db_get(context, instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return exp_instance self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get) @@ -10062,7 +10062,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): fake_driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="host", - aggregate=jsonutils.to_primitive(self.aggr), slave_info=None) + aggregate=jsonutils.to_primitive(self.aggr), subordinate_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): @@ -10076,36 +10076,36 @@ def fake_driver_remove_from_aggregate(context, aggregate, host, self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="host", - slave_info=None) + subordinate_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) - def test_add_aggregate_host_passes_slave_info_to_driver(self): + def test_add_aggregate_host_passes_subordinate_info_to_driver(self): def driver_add_to_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stubs.Set(self.compute.driver, "add_to_aggregate", driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="the_host", - slave_info="SLAVE_INFO", + subordinate_info="SLAVE_INFO", aggregate=jsonutils.to_primitive(self.aggr)) - def test_remove_from_aggregate_passes_slave_info_to_driver(self): + def test_remove_from_aggregate_passes_subordinate_info_to_driver(self): def driver_remove_from_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stubs.Set(self.compute.driver, "remove_from_aggregate", driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="the_host", - slave_info="SLAVE_INFO") + subordinate_info="SLAVE_INFO") class ComputePolicyTestCase(BaseTestCase): diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py index cd225afe28..f084692d67 100644 --- a/nova/tests/compute/test_compute_api.py +++ b/nova/tests/compute/test_compute_api.py @@ -769,7 +769,7 @@ def test_delete_fast_if_host_not_set(self): db.block_device_mapping_get_all_by_instance(self.context, inst.uuid, - use_slave=False).AndReturn([]) + use_subordinate=False).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.task_state, @@ -872,7 +872,7 @@ def test_delete_soft_rollback(self): timeutils.set_time_override(delete_time) db.block_device_mapping_get_all_by_instance( - self.context, inst.uuid, use_slave=False).AndReturn([]) + self.context, inst.uuid, use_subordinate=False).AndReturn([]) inst.save().AndRaise(test.TestingException) self.mox.ReplayAll() @@ -1660,7 +1660,7 @@ def test_snapshot_volume_backed(self): 'is_public': False } - def fake_get_all_by_instance(context, instance, use_slave=False): + def fake_get_all_by_instance(context, instance, use_subordinate=False): return copy.deepcopy(instance_bdms) def fake_image_create(context, image_meta, data=None): diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py index 370e4efcfc..37c198bdbf 100644 --- a/nova/tests/compute/test_compute_mgr.py +++ b/nova/tests/compute/test_compute_mgr.py @@ -213,7 +213,7 @@ def _do_mock_calls(defer_iptables_apply): context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host( fake_context, our_host, columns_to_join=['info_cache'], - use_slave=False + use_subordinate=False ).AndReturn(startup_instances) if defer_iptables_apply: self.compute.driver.filter_defer_apply_on() @@ -295,7 +295,7 @@ def test_init_host_with_deleted_migration(self): context.get_admin_context().AndReturn(fake_context) db.instance_get_all_by_host(fake_context, our_host, columns_to_join=['info_cache'], - use_slave=False + use_subordinate=False ).AndReturn([]) self.compute.init_virt_events() @@ -728,7 +728,7 @@ def test_get_instances_on_driver(self): inst in driver_instances]}, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None, - use_slave=True).AndReturn( + use_subordinate=True).AndReturn( driver_instances) self.mox.ReplayAll() @@ -769,7 +769,7 @@ def test_get_instances_on_driver_fallback(self): fake_context, filters, 'created_at', 'desc', columns_to_join=None, limit=None, marker=None, - use_slave=True).AndReturn(all_instances) + use_subordinate=True).AndReturn(all_instances) self.mox.ReplayAll() @@ -817,7 +817,7 @@ def _get_sync_instance(self, power_state, vm_state, task_state=None, def test_sync_instance_power_state_match(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.RUNNING) @@ -825,7 +825,7 @@ def test_sync_instance_power_state_match(self): def test_sync_instance_power_state_running_stopped(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) instance.save() self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, @@ -836,7 +836,7 @@ def _test_sync_to_stop(self, power_state, vm_state, driver_power_state, stop=True, force=False, shutdown_terminate=False): instance = self._get_sync_instance( power_state, vm_state, shutdown_terminate=shutdown_terminate) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) instance.save() self.mox.StubOutWithMock(self.compute.compute_api, 'stop') self.mox.StubOutWithMock(self.compute.compute_api, 'delete') @@ -907,7 +907,7 @@ def test_query_driver_power_state_and_sync_not_found_driver( mock_sync_power_state.assert_called_once_with(self.context, db_instance, power_state.NOSTATE, - use_slave=True) + use_subordinate=True) def test_run_pending_deletes(self): self.flags(instance_delete_interval=10) @@ -941,7 +941,7 @@ def get_by_filters(self, *args, **kwargs): 'cleaned': False}, expected_attrs=['info_cache', 'security_groups', 'system_metadata'], - use_slave=True).AndReturn([a, b, c]) + use_subordinate=True).AndReturn([a, b, c]) self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files') self.compute.driver.delete_instance_files( diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py index 7c84a23075..bf0e60119f 100644 --- a/nova/tests/compute/test_compute_utils.py +++ b/nova/tests/compute/test_compute_utils.py @@ -76,7 +76,7 @@ def setUp(self): self.data = [] self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', - lambda context, instance, use_slave=False: self.data) + lambda context, instance, use_subordinate=False: self.data) def _update_flavor(self, flavor_info): self.flavor = { diff --git a/nova/tests/compute/test_compute_xen.py b/nova/tests/compute/test_compute_xen.py index 4870e37c66..dfe369345c 100644 --- a/nova/tests/compute/test_compute_xen.py +++ b/nova/tests/compute/test_compute_xen.py @@ -52,7 +52,7 @@ def test_sync_power_states_instance_not_found(self): self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') objects.InstanceList.get_by_host(ctxt, - self.compute.host, use_slave=True).AndReturn(instance_list) + self.compute.host, use_subordinate=True).AndReturn(instance_list) self.compute.driver.get_num_instances().AndReturn(1) vm_utils.lookup(self.compute.driver._session, instance['name'], False).AndReturn(None) diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py index 5e8e45c046..1aa8550b55 100644 --- a/nova/tests/compute/test_rpcapi.py +++ b/nova/tests/compute/test_rpcapi.py @@ -105,13 +105,13 @@ def _test_compute_api(self, method, rpc_method, **kwargs): def test_add_aggregate_host(self): self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) # NOTE(russellb) Havana compat self.flags(compute='havana', group='upgrade_levels') self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}, version='2.14') + subordinate_info={}, version='2.14') def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', @@ -558,13 +558,13 @@ def test_refresh_security_group_members(self): def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) # NOTE(russellb) Havana compat self.flags(compute='havana', group='upgrade_levels') self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}, version='2.15') + subordinate_info={}, version='2.15') def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 50d4b616f8..d6151ad8a2 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -408,23 +408,23 @@ def test_instance_get_all_by_filters(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - columns_to_join=None, use_slave=False) + columns_to_join=None, use_subordinate=False) self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', None, False) - def test_instance_get_all_by_filters_use_slave(self): + def test_instance_get_all_by_filters_use_subordinate(self): filters = {'foo': 'bar'} self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', - columns_to_join=None, use_slave=True) + columns_to_join=None, use_subordinate=True) self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort', columns_to_join=None, - use_slave=True) + use_subordinate=True) def test_instance_get_all_by_host(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_host') @@ -1239,10 +1239,10 @@ def test_build_instances(self): {'host': 'host2', 'nodename': 'node2', 'limits': []}]) db.instance_get_by_uuid(self.context, instances[0].uuid, columns_to_join=['system_metadata'], - use_slave=False).AndReturn( + use_subordinate=False).AndReturn( jsonutils.to_primitive(instances[0])) db.block_device_mapping_get_all_by_instance(self.context, - instances[0].uuid, use_slave=False).AndReturn([]) + instances[0].uuid, use_subordinate=False).AndReturn([]) self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=mox.IgnoreArg(), @@ -1265,10 +1265,10 @@ def test_build_instances(self): node='node1', limits=[]) db.instance_get_by_uuid(self.context, instances[1].uuid, columns_to_join=['system_metadata'], - use_slave=False).AndReturn( + use_subordinate=False).AndReturn( jsonutils.to_primitive(instances[1])) db.block_device_mapping_get_all_by_instance(self.context, - instances[1].uuid, use_slave=False).AndReturn([]) + instances[1].uuid, use_subordinate=False).AndReturn([]) self.conductor_manager.compute_rpcapi.build_and_run_instance( self.context, instance=mox.IgnoreArg(), diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index 8ad6518512..7af3d874a5 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -1224,8 +1224,8 @@ def test_security_group_get_no_instances(self): session = get_session() self.mox.StubOutWithMock(sqlalchemy_api, 'get_session') - sqlalchemy_api.get_session(use_slave=False).AndReturn(session) - sqlalchemy_api.get_session(use_slave=False).AndReturn(session) + sqlalchemy_api.get_session(use_subordinate=False).AndReturn(session) + sqlalchemy_api.get_session(use_subordinate=False).AndReturn(session) self.mox.ReplayAll() security_group = db.security_group_get(self.ctxt, sid, diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index d0b22008c1..13818e7e3d 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -4047,7 +4047,7 @@ class VolumeAttachmentsSampleBase(ServersSampleBase): def _stub_db_bdms_get_all_by_instance(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', diff --git a/nova/tests/integrated/v3/test_extended_volumes.py b/nova/tests/integrated/v3/test_extended_volumes.py index 60b35d7e78..88c5da483f 100644 --- a/nova/tests/integrated/v3/test_extended_volumes.py +++ b/nova/tests/integrated/v3/test_extended_volumes.py @@ -31,7 +31,7 @@ class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase): def _stub_compute_api_get_instance_bdms(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index ff6a3772ee..ba9b7d8778 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -115,7 +115,7 @@ def test_get_vifs_by_instance(self, mock_get_by_instance, self.assertEqual(123, vifs[0].network_id) self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid) mock_get_by_instance.assert_called_once_with( - self.context, str(mock.sentinel.inst_uuid), use_slave=False) + self.context, str(mock.sentinel.inst_uuid), use_subordinate=False) mock_get_by_id.assert_called_once_with(self.context, 123, project_only='allow_none') @@ -174,7 +174,7 @@ def fake_associate(*args, **kwargs): def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, - use_slave=None): + use_subordinate=None): return fake_instance.fake_db_instance(uuid=instance_uuid) self.stubs.Set(self.network_api.db, 'instance_get_by_uuid', diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py index cfd5725540..07f23b2743 100644 --- a/nova/tests/network/test_linux_net.py +++ b/nova/tests/network/test_linux_net.py @@ -287,7 +287,7 @@ def setUp(self): self.context = context.RequestContext('testuser', 'testproject', is_admin=True) - def get_vifs(_context, instance_uuid, use_slave): + def get_vifs(_context, instance_uuid, use_subordinate): return [vif for vif in vifs if vif['instance_uuid'] == instance_uuid] diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 7f6fe0047d..ceece4345b 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -477,7 +477,7 @@ def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve): inst = fake_inst(display_name=HOST, uuid=FAKEUUID) db.instance_get_by_uuid(self.context, - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(inst) @@ -530,7 +530,7 @@ def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve): inst = fake_inst(display_name=HOST, uuid=FAKEUUID) db.instance_get_by_uuid(self.context, - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(inst) @@ -627,7 +627,7 @@ def test_instance_dns(self, reserve): inst = fake_inst(display_name=HOST, uuid=FAKEUUID) db.instance_get_by_uuid(self.context, - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(inst) @@ -871,7 +871,7 @@ def test_vpn_allocate_fixed_ip(self): db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, @@ -919,7 +919,7 @@ def test_allocate_fixed_ip(self): db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0]) db.instance_get_by_uuid(mox.IgnoreArg(), - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, @@ -1529,7 +1529,7 @@ def test_add_fixed_ip_instance_without_vpn_requested_networks(self): ).AndReturn(dict(test_network.fake_network, **networks[0])) db.instance_get_by_uuid(mox.IgnoreArg(), - mox.IgnoreArg(), use_slave=False, + mox.IgnoreArg(), use_subordinate=False, columns_to_join=['info_cache', 'security_groups'] ).AndReturn(fake_inst(display_name=HOST, @@ -1846,7 +1846,7 @@ def test_get_instance_nw_info_client_exceptions(self): 'virtual_interface_get_by_instance') manager.db.virtual_interface_get_by_instance( self.context, FAKEUUID, - use_slave=False).AndRaise(exception.InstanceNotFound( + use_subordinate=False).AndRaise(exception.InstanceNotFound( instance_id=FAKEUUID)) self.mox.ReplayAll() self.assertRaises(messaging.ExpectedException, diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py index cb61628640..8220e4d56a 100644 --- a/nova/tests/objects/test_instance.py +++ b/nova/tests/objects/test_instance.py @@ -104,7 +104,7 @@ def test_get_without_expected(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, 'uuid', columns_to_join=[], - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, 'uuid', @@ -123,7 +123,7 @@ def test_get_with_expected(self): db.instance_get_by_uuid( self.context, 'uuid', columns_to_join=exp_cols, - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) fake_faults = test_instance_fault.fake_faults db.instance_fault_get_by_instance_uuids( @@ -155,13 +155,13 @@ def test_load(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) fake_inst2 = dict(self.fake_instance, system_metadata=[{'key': 'foo', 'value': 'bar'}]) db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['system_metadata'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst2) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -186,7 +186,7 @@ def test_get_remote(self): db.instance_get_by_uuid(self.context, 'fake-uuid', columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_instance) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid') @@ -204,13 +204,13 @@ def test_refresh(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(dict(self.fake_instance, host='orig-host')) db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(dict(self.fake_instance, host='new-host')) self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache, @@ -232,7 +232,7 @@ def test_refresh_does_not_recurse(self): self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid') instance.Instance.get_by_uuid(self.context, uuid=inst.uuid, expected_attrs=['metadata'], - use_slave=False + use_subordinate=False ).AndReturn(inst_copy) self.mox.ReplayAll() self.assertRaises(exception.OrphanedObjectError, inst.refresh) @@ -279,7 +279,7 @@ def _save_test_helper(self, cell_type, save_kwargs): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(old_ref) db.instance_update_and_get_original( self.context, fake_uuid, expected_updates, @@ -359,7 +359,7 @@ def test_save_rename_sends_notification(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(old_ref) db.instance_update_and_get_original( self.context, fake_uuid, expected_updates, update_cells=False, @@ -372,7 +372,7 @@ def test_save_rename_sends_notification(self): self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'], - use_slave=False) + use_subordinate=False) self.assertEqual('hello', inst.display_name) inst.display_name = 'goodbye' inst.save() @@ -402,7 +402,7 @@ def test_get_deleted(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -416,7 +416,7 @@ def test_get_not_cleaned(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -430,7 +430,7 @@ def test_get_cleaned(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -454,7 +454,7 @@ def test_with_info_cache(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) db.instance_info_cache_update(self.context, fake_uuid, {'network_info': nwinfo2_json}) @@ -471,7 +471,7 @@ def test_with_info_cache_none(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid, @@ -497,7 +497,7 @@ def test_with_security_groups(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) db.security_group_update(self.context, 1, {'description': 'changed'} ).AndReturn(fake_inst['security_groups'][0]) @@ -522,7 +522,7 @@ def test_with_empty_security_groups(self): db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid) @@ -534,7 +534,7 @@ def test_with_empty_pci_devices(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['pci_devices'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid, @@ -580,7 +580,7 @@ def test_with_pci_devices(self): self.mox.StubOutWithMock(db, 'instance_get_by_uuid') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=['pci_devices'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) self.mox.ReplayAll() inst = instance.Instance.get_by_uuid(self.context, fake_uuid, @@ -598,7 +598,7 @@ def test_with_fault(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_by_uuid(self.context, fake_uuid, columns_to_join=[], - use_slave=False + use_subordinate=False ).AndReturn(self.fake_instance) db.instance_fault_get_by_instance_uuids( self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults}) @@ -935,11 +935,11 @@ def test_get_all_by_filters(self): db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'], - use_slave=False).AndReturn(fakes) + use_subordinate=False).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) @@ -956,12 +956,12 @@ def test_get_all_by_filters_works_for_cleaned(self): {'deleted': True, 'cleaned': False}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'], - use_slave=False).AndReturn( + use_subordinate=False).AndReturn( [fakes[1]]) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) @@ -974,7 +974,7 @@ def test_get_by_host(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_host') db.instance_get_all_by_host(self.context, 'foo', columns_to_join=None, - use_slave=False).AndReturn(fakes) + use_subordinate=False).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_host(self.context, 'foo') for i in range(0, len(fakes)): @@ -1062,7 +1062,7 @@ def test_with_fault(self): self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') db.instance_get_all_by_host(self.context, 'host', columns_to_join=[], - use_slave=False + use_subordinate=False ).AndReturn(fake_insts) db.instance_fault_get_by_instance_uuids( self.context, [x['uuid'] for x in fake_insts] @@ -1070,7 +1070,7 @@ def test_with_fault(self): self.mox.ReplayAll() instances = instance.InstanceList.get_by_host(self.context, 'host', expected_attrs=['fault'], - use_slave=False) + use_subordinate=False) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults['fake-uuid'][0], dict(instances[0].fault.iteritems())) diff --git a/nova/tests/objects/test_migration.py b/nova/tests/objects/test_migration.py index 23e1b857a0..ed4b4ea5af 100644 --- a/nova/tests/objects/test_migration.py +++ b/nova/tests/objects/test_migration.py @@ -116,7 +116,7 @@ def test_instance(self): db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'], columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(fake_inst) mig = migration.Migration._from_db_object(ctxt, migration.Migration(), @@ -133,11 +133,11 @@ def test_get_unconfirmed_by_dest_compute(self): db, 'migration_get_unconfirmed_by_dest_compute') db.migration_get_unconfirmed_by_dest_compute( ctxt, 'window', 'foo', - use_slave=False).AndReturn(db_migrations) + use_subordinate=False).AndReturn(db_migrations) self.mox.ReplayAll() migrations = ( migration.MigrationList.get_unconfirmed_by_dest_compute( - ctxt, 'window', 'foo', use_slave=False)) + ctxt, 'window', 'foo', use_subordinate=False)) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index 7bf28e1131..6c278ea317 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -204,7 +204,7 @@ def test_format_instance_mapping(self): 'default_ephemeral_device': None, 'default_swap_device': None}) - def fake_bdm_get(ctxt, uuid, use_slave=False): + def fake_bdm_get(ctxt, uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': 87654321, 'snapshot_id': None, diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py index cd0ba4687c..5dea0ff36c 100644 --- a/nova/tests/virt/libvirt/test_driver.py +++ b/nova/tests/virt/libvirt/test_driver.py @@ -5899,7 +5899,7 @@ def _test_destroy_removes_disk(self, volume_fail=False): db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(), columns_to_join=['info_cache', 'security_groups'], - use_slave=False + use_subordinate=False ).AndReturn(instance) self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py index 0453d222db..6d98783ca0 100644 --- a/nova/tests/virt/xenapi/test_xenapi.py +++ b/nova/tests/virt/xenapi/test_xenapi.py @@ -1431,7 +1431,7 @@ def test_uuid_find(self): fake_inst2 = fake_instance.fake_db_instance(id=456) db.instance_get_all_by_host(self.context, fake_inst['host'], columns_to_join=None, - use_slave=False + use_subordinate=False ).AndReturn([fake_inst, fake_inst2]) self.mox.ReplayAll() expected_name = CONF.instance_name_template % fake_inst['id'] @@ -1448,7 +1448,7 @@ def fake_aggregate_get_by_host(self, *args, **kwargs): self.stubs.Set(db, "aggregate_get_by_host", fake_aggregate_get_by_host) - self.stubs.Set(self.conn._session, "is_slave", True) + self.stubs.Set(self.conn._session, "is_subordinate", True) self.assertRaises(test.TestingException, self.conn._session._get_host_uuid) @@ -2903,7 +2903,7 @@ def setUp(self): pool_states.POOL_FLAG: 'XenAPI'}} self.aggr = db.aggregate_create(self.context, values) self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', - 'master_compute': 'host', + 'main_compute': 'host', 'availability_zone': 'fake_zone', pool_states.KEY: pool_states.ACTIVE, 'host': xenapi_fake.get_record('host', @@ -2913,18 +2913,18 @@ def test_pool_add_to_aggregate_called_by_driver(self): calls = [] - def pool_add_to_aggregate(context, aggregate, host, slave_info=None): + def pool_add_to_aggregate(context, aggregate, host, subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) calls.append(pool_add_to_aggregate) self.stubs.Set(self.conn._pool, "add_to_aggregate", pool_add_to_aggregate) self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertIn(pool_add_to_aggregate, calls) @@ -2933,18 +2933,18 @@ def test_pool_remove_from_aggregate_called_by_driver(self): calls = [] def pool_remove_from_aggregate(context, aggregate, host, - slave_info=None): + subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) calls.append(pool_remove_from_aggregate) self.stubs.Set(self.conn._pool, "remove_from_aggregate", pool_remove_from_aggregate) self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertIn(pool_remove_from_aggregate, calls) @@ -2960,11 +2960,11 @@ def fake_init_pool(id, name): self.assertThat(self.fake_metadata, matchers.DictMatches(result['metadetails'])) - def test_join_slave(self): - # Ensure join_slave gets called when the request gets to master. - def fake_join_slave(id, compute_uuid, host, url, user, password): - fake_join_slave.called = True - self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave) + def test_join_subordinate(self): + # Ensure join_subordinate gets called when the request gets to main. + def fake_join_subordinate(id, compute_uuid, host, url, user, password): + fake_join_subordinate.called = True + self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate) aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -2974,7 +2974,7 @@ def fake_join_slave(id, compute_uuid, host, url, user, password): user='fake_user', passwd='fake_pass', xenhost_uuid='fake_uuid')) - self.assertTrue(fake_join_slave.called) + self.assertTrue(fake_join_subordinate.called) def test_add_to_aggregate_first_host(self): def fake_pool_set_name_label(self, session, pool_ref, name): @@ -3014,19 +3014,19 @@ def test_remove_from_empty_aggregate(self): self.conn._pool.remove_from_aggregate, self.context, result, "test_host") - def test_remove_slave(self): - # Ensure eject slave gets called. - def fake_eject_slave(id, compute_uuid, host_uuid): - fake_eject_slave.called = True - self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave) + def test_remove_subordinate(self): + # Ensure eject subordinate gets called. + def fake_eject_subordinate(id, compute_uuid, host_uuid): + fake_eject_subordinate.called = True + self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate) self.fake_metadata['host2'] = 'fake_host2_uuid' aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") - self.assertTrue(fake_eject_slave.called) + self.assertTrue(fake_eject_subordinate.called) - def test_remove_master_solo(self): + def test_remove_main_solo(self): # Ensure metadata are cleared after removal. def fake_clear_pool(id): fake_clear_pool.called = True @@ -3041,8 +3041,8 @@ def fake_clear_pool(id): pool_states.KEY: pool_states.ACTIVE}, matchers.DictMatches(result['metadetails'])) - def test_remote_master_non_empty_pool(self): - # Ensure AggregateError is raised if removing the master. + def test_remote_main_non_empty_pool(self): + # Ensure AggregateError is raised if removing the main. aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -3152,7 +3152,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): self.compute.add_aggregate_host, self.context, host="fake_host", aggregate=jsonutils.to_primitive(self.aggr), - slave_info=None) + subordinate_info=None) excepted = db.aggregate_get(self.context, self.aggr['id']) self.assertEqual(excepted['metadetails'][pool_states.KEY], pool_states.ERROR) @@ -3164,16 +3164,16 @@ def __init__(self): self._mock_calls = [] def add_aggregate_host(self, ctxt, aggregate, - host_param, host, slave_info): + host_param, host, subordinate_info): self._mock_calls.append(( self.add_aggregate_host, ctxt, aggregate, - host_param, host, slave_info)) + host_param, host, subordinate_info)) def remove_aggregate_host(self, ctxt, aggregate_id, host_param, - host, slave_info): + host, subordinate_info): self._mock_calls.append(( self.remove_aggregate_host, ctxt, aggregate_id, - host_param, host, slave_info)) + host_param, host, subordinate_info)) class StubDependencies(object): @@ -3188,10 +3188,10 @@ def _is_hv_pool(self, *_ignore): def _get_metadata(self, *_ignore): return { pool_states.KEY: {}, - 'master_compute': 'master' + 'main_compute': 'main' } - def _create_slave_info(self, *ignore): + def _create_subordinate_info(self, *ignore): return "SLAVE_INFO" @@ -3205,32 +3205,32 @@ class HypervisorPoolTestCase(test.NoDBTestCase): 'id': 98, 'hosts': [], 'metadata': { - 'master_compute': 'master', + 'main_compute': 'main', pool_states.POOL_FLAG: {}, pool_states.KEY: {} } } - def test_slave_asks_master_to_add_slave_to_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_add_subordinate_to_pool(self): + subordinate = ResourcePoolWithStubs() - slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.add_to_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.add_aggregate_host, + (subordinate.compute_rpcapi.add_aggregate_host, "CONTEXT", jsonutils.to_primitive(self.fake_aggregate), - "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + "subordinate", "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) - def test_slave_asks_master_to_remove_slave_from_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_remove_subordinate_from_pool(self): + subordinate = ResourcePoolWithStubs() - slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.remove_from_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.remove_aggregate_host, - "CONTEXT", 98, "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + (subordinate.compute_rpcapi.remove_aggregate_host, + "CONTEXT", 98, "subordinate", "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) class SwapXapiHostTestCase(test.NoDBTestCase): diff --git a/nova/virt/xenapi/client/session.py b/nova/virt/xenapi/client/session.py index 1dc5b4446e..eae76f5ded 100644 --- a/nova/virt/xenapi/client/session.py +++ b/nova/virt/xenapi/client/session.py @@ -77,7 +77,7 @@ def __init__(self, url, user, pw): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() - self.is_slave = False + self.is_subordinate = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) @@ -107,13 +107,13 @@ def _create_first_session(self, url, user, pw, exception): with timeout.Timeout(CONF.xenserver.login_timeout, exception): session.login_with_password(user, pw) except self.XenAPI.Failure as e: - # if user and pw of the master are different, we're doomed! + # if user and pw of the main are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': - master = e.details[1] - url = pool.swap_xapi_host(url, master) + main = e.details[1] + url = pool.swap_xapi_host(url, main) session = self.XenAPI.Session(url) session.login_with_password(user, pw) - self.is_slave = True + self.is_subordinate = True else: raise self._sessions.put(session) @@ -127,7 +127,7 @@ def _populate_session_pool(self, url, user, pw, exception): self._sessions.put(session) def _get_host_uuid(self): - if self.is_slave: + if self.is_subordinate: aggr = objects.AggregateList.get_by_host( context.get_admin_context(), CONF.host, key=pool_states.POOL_FLAG)[0] diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 03b8b269eb..780a663386 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -113,7 +113,7 @@ def create_host(name_label, hostname='fake_name', address='fake_addr'): # Create a pool if we don't have one already if len(_db_content['pool']) == 0: pool_ref = _create_pool('') - _db_content['pool'][pool_ref]['master'] = host_ref + _db_content['pool'][pool_ref]['main'] = host_ref _db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref _db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref @@ -841,7 +841,7 @@ def __getattr__(self, name): return self._session elif name == 'xenapi': return _Dispatcher(self.xenapi_request, None) - elif name.startswith('login') or name.startswith('slave_local'): + elif name.startswith('login') or name.startswith('subordinate_local'): return lambda *params: self._login(name, params) elif name.startswith('Async'): return lambda *params: self._async(name, params) diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index 8cdcb37adb..4f51e80c8d 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -65,7 +65,7 @@ def undo_aggregate_operation(self, context, op, aggregate, 'during operation on %(host)s'), {'aggregate_id': aggregate['id'], 'host': host}) - def add_to_aggregate(self, context, aggregate, host, slave_info=None): + def add_to_aggregate(self, context, aggregate, host, subordinate_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate['metadata']): return @@ -83,38 +83,38 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None): if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate['hosts']) == 1: - # this is the first host of the pool -> make it master + # this is the first host of the pool -> make it main self._init_pool(aggregate['id'], aggregate['name']) - # save metadata so that we can find the master again - metadata = {'master_compute': host, + # save metadata so that we can find the main again + metadata = {'main_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE} aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. - master_compute = aggregate['metadata']['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> do a pool-join - # To this aim, nova compute on the slave has to go down. + main_compute = aggregate['metadata']['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> do a pool-join + # To this aim, nova compute on the subordinate has to go down. # NOTE: it is assumed that ONLY nova compute is running now - self._join_slave(aggregate['id'], host, - slave_info.get('compute_uuid'), - slave_info.get('url'), slave_info.get('user'), - slave_info.get('passwd')) - metadata = {host: slave_info.get('xenhost_uuid'), } + self._join_subordinate(aggregate['id'], host, + subordinate_info.get('compute_uuid'), + subordinate_info.get('url'), subordinate_info.get('user'), + subordinate_info.get('passwd')) + metadata = {host: subordinate_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) - elif master_compute and master_compute != host: - # send rpc cast to master, asking to add the following + elif main_compute and main_compute != host: + # send rpc cast to main, asking to add the following # host with specified credentials. - slave_info = self._create_slave_info() + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.add_aggregate_host( - context, aggregate, host, master_compute, slave_info) + context, aggregate, host, main_compute, subordinate_info) - def remove_from_aggregate(self, context, aggregate, host, slave_info=None): + def remove_from_aggregate(self, context, aggregate, host, subordinate_info=None): """Remove a compute host from an aggregate.""" - slave_info = slave_info or dict() + subordinate_info = subordinate_info or dict() if not pool_states.is_hv_pool(aggregate['metadata']): return @@ -127,19 +127,19 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): aggregate_id=aggregate['id'], reason=invalid[aggregate['metadata'][pool_states.KEY]]) - master_compute = aggregate['metadata']['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> instruct it to eject a host from the pool + main_compute = aggregate['metadata']['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> instruct it to eject a host from the pool host_uuid = aggregate['metadata'][host] - self._eject_slave(aggregate['id'], - slave_info.get('compute_uuid'), host_uuid) + self._eject_subordinate(aggregate['id'], + subordinate_info.get('compute_uuid'), host_uuid) aggregate.update_metadata({host: None}) - elif master_compute == host: - # Remove master from its own pool -> destroy pool only if the - # master is on its own, otherwise raise fault. Destroying a - # pool made only by master is fictional + elif main_compute == host: + # Remove main from its own pool -> destroy pool only if the + # main is on its own, otherwise raise fault. Destroying a + # pool made only by main is fictional if len(aggregate['hosts']) > 1: - # NOTE: this could be avoided by doing a master + # NOTE: this could be avoided by doing a main # re-election, but this is simpler for now. raise exception.InvalidAggregateAction( aggregate_id=aggregate['id'], @@ -148,32 +148,32 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): 'from the pool; pool not empty') % host) self._clear_pool(aggregate['id']) - aggregate.update_metadata({'master_compute': None, host: None}) - elif master_compute and master_compute != host: - # A master exists -> forward pool-eject request to master - slave_info = self._create_slave_info() + aggregate.update_metadata({'main_compute': None, host: None}) + elif main_compute and main_compute != host: + # A main exists -> forward pool-eject request to main + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.remove_aggregate_host( - context, aggregate['id'], host, master_compute, slave_info) + context, aggregate['id'], host, main_compute, subordinate_info) else: # this shouldn't have happened raise exception.AggregateError(aggregate_id=aggregate['id'], action='remove_from_aggregate', reason=_('Unable to eject %s ' - 'from the pool; No master found') + 'from the pool; No main found') % host) - def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): - """Joins a slave into a XenServer resource pool.""" + def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd): + """Joins a subordinate into a XenServer resource pool.""" try: args = {'compute_uuid': compute_uuid, 'url': url, 'user': user, 'password': passwd, 'force': jsonutils.dumps(CONF.xenserver.use_join_force), - 'master_addr': self._host_addr, - 'master_user': CONF.xenserver.connection_username, - 'master_pass': CONF.xenserver.connection_password, } + 'main_addr': self._host_addr, + 'main_user': CONF.xenserver.connection_username, + 'main_pass': CONF.xenserver.connection_password, } self._session.call_plugin('xenhost', 'host_join', args) except self._session.XenAPI.Failure as e: LOG.error(_("Pool-Join failed: %s"), e) @@ -182,8 +182,8 @@ def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): reason=_('Unable to join %s ' 'in the pool') % host) - def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): - """Eject a slave from a XenServer resource pool.""" + def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid): + """Eject a subordinate from a XenServer resource pool.""" try: # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution @@ -222,7 +222,7 @@ def _clear_pool(self, aggregate_id): action='remove_from_aggregate', reason=str(e.details)) - def _create_slave_info(self): + def _create_subordinate_info(self): """XenServer specific info needed to join the hypervisor pool.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py index ae431ddecb..f4acdf541b 100644 --- a/nova/virt/xenapi/pool_states.py +++ b/nova/virt/xenapi/pool_states.py @@ -25,7 +25,7 @@ A 'created' pool becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; this is to allow the hypervisor layer to instantiate the underlying pool -without any potential race condition that may incur in master/slave-based +without any potential race condition that may incur in main/subordinate-based configurations. The pool goes into the 'active' state when the underlying pool has been correctly instantiated. All other operations (e.g. add/remove hosts) that succeed will keep the diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py index 9e441f4834..009db6580b 100755 --- a/tools/db/schema_diff.py +++ b/tools/db/schema_diff.py @@ -33,12 +33,12 @@ MYSQL: ./tools/db/schema_diff.py mysql://root@localhost \ - master:latest my_branch:82 + main:latest my_branch:82 POSTGRESQL: ./tools/db/schema_diff.py postgresql://localhost \ - master:latest my_branch:82 + main:latest my_branch:82 """ from __future__ import print_function @@ -225,12 +225,12 @@ def parse_options(): try: orig_branch, orig_version = sys.argv[2].split(':') except IndexError: - usage('original branch and version required (e.g. master:82)') + usage('original branch and version required (e.g. main:82)') try: new_branch, new_version = sys.argv[3].split(':') except IndexError: - usage('new branch and version required (e.g. master:82)') + usage('new branch and version required (e.g. main:82)') return db_url, orig_branch, orig_version, new_branch, new_version